max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/loading/definition/responses/headers/single/test_items_validation.py | maroux/flex | 160 | 12618414 | <reponame>maroux/flex
import pytest
from flex.constants import (
ARRAY,
)
from flex.exceptions import (
ValidationError,
)
from flex.loading.definitions.responses.single.headers.single import (
single_header_validator,
)
def test_items_is_not_required(msg_assertions):
try:
single_header_validator({})
except ValidationError as err:
errors = err.detail
else:
errors = {}
msg_assertions.assert_path_not_in_errors('headers', errors)
@pytest.mark.parametrize(
'value',
(None, True, 1, 1.1),
)
def test_items_type_validation(value, MESSAGES, msg_assertions):
with pytest.raises(ValidationError) as err:
single_header_validator({'items': value})
msg_assertions.assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'items.type',
)
def test_items_is_required_if_type_array(msg_assertions, MESSAGES):
with pytest.raises(ValidationError) as err:
single_header_validator({'type': ARRAY})
msg_assertions.assert_message_in_errors(
MESSAGES['required']['required'],
err.value.detail,
'items',
)
|
tests/posting/test_log2k8s.py | MarkusH/kopf | 1,038 | 12618417 | import logging
import pytest
from kopf.engines.logging import ObjectLogger, LocalObjectLogger
OBJ1 = {'apiVersion': 'group1/version1', 'kind': 'Kind1',
'metadata': {'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1'}}
REF1 = {'apiVersion': 'group1/version1', 'kind': 'Kind1',
'uid': 'uid1', 'name': 'name1', 'namespace': 'ns1'}
@pytest.mark.parametrize('logfn, event_type', [
['info', "Normal"],
['warning', "Warning"],
['error', "Error"],
['critical', "Fatal"],
])
async def test_posting_normal_levels(settings, caplog, logstream, logfn, event_type,
event_queue, event_queue_loop):
logger = ObjectLogger(body=OBJ1, settings=settings)
logger_fn = getattr(logger, logfn)
logger_fn("hello %s", "world")
assert event_queue.qsize() == 1
event1 = event_queue.get_nowait()
assert event1.ref == REF1
assert event1.type == event_type
assert event1.reason == "Logging"
assert event1.message == "hello world"
assert caplog.messages == ["hello world"]
@pytest.mark.parametrize('logfn, event_type, min_levelno', [
['debug', "Debug", logging.DEBUG],
['info', "Normal", logging.INFO],
['warning', "Warning", logging.WARNING],
['error', "Error", logging.ERROR],
['critical', "Fatal", logging.CRITICAL],
])
async def test_posting_above_config(settings, caplog, logstream, logfn, event_type, min_levelno,
event_queue, event_queue_loop, mocker):
logger = ObjectLogger(body=OBJ1, settings=settings)
logger_fn = getattr(logger, logfn)
settings.posting.level = min_levelno
logger_fn("hello %s", "world")
settings.posting.level = min_levelno + 1
logger_fn("must not be posted")
assert event_queue.qsize() == 1
event1 = event_queue.get_nowait()
assert event1.ref == REF1
assert event1.type == event_type
assert event1.reason == "Logging"
assert event1.message == "hello world"
assert caplog.messages == ["hello world", "must not be posted"]
@pytest.mark.parametrize('logfn', [
'debug',
])
async def test_skipping_hidden_levels(settings, caplog, logstream, logfn,
event_queue, event_queue_loop):
logger = ObjectLogger(body=OBJ1, settings=settings)
logger_fn = getattr(logger, logfn)
logger_fn("hello %s", "world")
logger.info("must be here")
assert event_queue.qsize() == 1 # not 2!
assert caplog.messages == ["hello world", "must be here"]
@pytest.mark.parametrize('logfn', [
'debug',
'info',
'warning',
'error',
'critical',
])
async def test_skipping_below_config(settings, caplog, logstream, logfn,
event_queue, event_queue_loop, mocker):
logger = ObjectLogger(body=OBJ1, settings=settings)
logger_fn = getattr(logger, logfn)
settings.posting.level = 666
logger_fn("hello %s", "world")
settings.posting.level = 0
logger.info("must be here")
assert event_queue.qsize() == 1 # not 2!
assert caplog.messages == ["hello world", "must be here"]
@pytest.mark.parametrize('logfn', [
'debug',
'info',
'warning',
'error',
'critical',
])
async def test_skipping_when_disabled(settings, caplog, logstream, logfn,
event_queue, event_queue_loop):
logger = LocalObjectLogger(body=OBJ1, settings=settings)
logger_fn = getattr(logger, logfn)
settings.posting.enabled = False
settings.posting.level = 0
logger_fn("hello %s", "world")
assert event_queue.qsize() == 0
assert caplog.messages == ["hello world"]
@pytest.mark.parametrize('logfn', [
'debug',
'info',
'warning',
'error',
'critical',
])
async def test_skipping_when_local_with_all_levels(settings, caplog, logstream, logfn,
event_queue, event_queue_loop):
logger = LocalObjectLogger(body=OBJ1, settings=settings)
logger_fn = getattr(logger, logfn)
logger_fn("hello %s", "world")
assert event_queue.qsize() == 0
assert caplog.messages == ["hello world"]
|
mindsdb/api/mongo/responders/update_range_deletions.py | yarenty/mindsdb | 261 | 12618422 | <gh_stars>100-1000
from mindsdb.api.mongo.classes import Responder
class Responce(Responder):
when = {'update': 'rangeDeletions'}
result = {
"ok": 1
}
responder = Responce()
|
sharppy/sharptab/profile.py | skovic/SHARPpy | 163 | 12618429 | ''' Create the Sounding (Profile) Object '''
from __future__ import division
import numpy as np
import numpy.ma as ma
import getpass
from datetime import datetime
from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire
import sharppy.io.qc_tools as qc_tools
from sharppy.databases.sars import hail, supercell
from sharppy.databases.pwv import pwv_climo
from sharppy.sharptab.constants import MISSING
import logging
import warnings
def create_profile(**kwargs):
'''
This is a wrapper function for constructing Profile objects
and objects that inherit from the Profile class. This will
construct and return the appropriate Profile object
based on the supplied keyword argument. If no profile keyword
is supplied, it defaults to a basic Profile. This also requires
that you pass through all the relevant keyword arguments for
the constructors to the Profile objects and the objects that
inherit from Profile.
Parameters
----------
Mandatory Keywords
pres : array_like
The pressure values (Hectopascals)
hght : array_like
The corresponding height values (Meters)
tmpc : array_like
The corresponding temperature values (Celsius)
dwpc : array_like
The corresponding dewpoint temperature values (Celsius)
Optional Keyword Pairs (must use one or the other)
wdir : array_like
The direction from which the wind is blowing in meteorological degrees
wspd : array_like
The speed of the wind (kts)
OR
u : array_like
The U-component of the direction from which the wind is blowing. (kts)
v : array_like
The V-component of the direction from which the wind is blowing. (kts)
Optional Keywords
missing : number, optional (default: sharppy.sharptab.constants.MISSING)
The value of the missing flag used in the Profile objects
profile : string, optional (default: 'default')
The text identifier for the Profile to be generated. Valid options
include ('default' | 'basic' | 'convective'). Default will construct a basic
Profile, and convective will construct a ConvectiveProfile used for
the SPC style GUI.
omeg: array_like
The corresponding vertical velocity values (Pa/s)
Returns
-------
Profile : a basic Profile object
This is the most basic and default object.
OR
ConvectiveProfile : a child of Profile
This is the class used for the SPC GUI.
'''
## Get the user's input for which Profile to construct.
## Make the default the 'default' profile.
profile = kwargs.get('profile', 'default')
## if the profile is default, pass the rest of the keyword
## arguments to the BasicProfile object and return it
if profile == 'default':
return BasicProfile(**kwargs)
## if the profile is raw, return a base profile object
elif profile == 'raw':
return Profile(**kwargs)
## if the profile is convective, pass the rest of the keyword
## arguments to the ConvectiveProfile object and return it
elif profile == 'convective':
return ConvectiveProfile(**kwargs)
class Profile(object):
def __init__(self, **kwargs):
## set the missing variable
self.missing = kwargs.get('missing', MISSING)
self.profile = kwargs.get('profile')
self.latitude = kwargs.get('latitude', ma.masked)
self.strictQC = kwargs.get('strictQC', False)
## get the data and turn them into arrays
self.pres = ma.asanyarray(kwargs.get('pres'), dtype=float)
self.hght = ma.asanyarray(kwargs.get('hght'), dtype=float)
self.tmpc = ma.asanyarray(kwargs.get('tmpc'), dtype=float)
self.dwpc = ma.asanyarray(kwargs.get('dwpc'), dtype=float)
assert self.pres.ndim == 1 and self.hght.ndim == 1 and self.tmpc.ndim == 1 and self.dwpc.ndim == 1,\
"The dimensions of the pres, hght, tmpc, and dwpc arrays passed to the Profile object constructor are not all one dimensional."
assert len(self.pres) > 1 and len(self.hght) > 1 and len(self.tmpc) > 1 and len(self.dwpc) > 1,\
"The length of the pres, hght, tmpc, and dwpc arrays passed to Profile object constructor must all have a length greater than 1."
assert len(self.pres) == len(self.hght) == len(self.tmpc) == len(self.dwpc),\
"The pres, hght, tmpc, or dwpc arrays passed to the Profile object constructor must all have the same length."
if np.ma.max(self.pres) <= 100:
warnings.warn("The pressure values passed to the profile object are below 100 mb. This may cause some the SHARPpy routines not to behave as expected.")
if 'wdir' in kwargs and 'wspd' in kwargs:
self.wdir = ma.asanyarray(kwargs.get('wdir'), dtype=float)
self.wspd = ma.asanyarray(kwargs.get('wspd'), dtype=float)
assert len(self.wdir) == len(self.wspd) == len(self.pres), "The wdir and wspd arrays passed to the Profile constructor must have the same length as the pres array."
assert self.wdir.ndim == 1 and self.wspd.ndim == 1, "The wdir and wspd arrays passed to the Profile constructor are not one dimensional."
#self.u, self.v = utils.vec2comp(self.wdir, self.wspd)
self.u = None
self.v = None
## did the user provide the wind in u,v form?
elif 'u' in kwargs and 'v' in kwargs:
self.u = ma.asanyarray(kwargs.get('u'), dtype=float)
self.v = ma.asanyarray(kwargs.get('v'), dtype=float)
assert len(self.u) == len(self.v) == len(self.pres), "The u and v arrays passed to the Profile constructor must have the same length as the pres array."
assert self.u.ndim == 1 and self.v.ndim == 1, "The wdir and wspd arrays passed to the Profile constructor are not one dimensional."
#self.wdir, self.wspd = utils.comp2vec(self.u, self.v)
self.wdir = None
self.wspd = None
else:
warnings.warn("No wind data (wdir/wspd or u/v) passed to the Profile object constructor. This may cause some of the SHARPpy routines to not behave as expected.")
## check if any standard deviation data was supplied
if 'tmp_stdev' in kwargs:
self.dew_stdev = ma.asanyarray(kwargs.get('dew_stdev'), dtype=float)
self.tmp_stdev = ma.asanyarray(kwargs.get('tmp_stdev'), dtype=float)
else:
self.dew_stdev = None
self.tmp_stdev = None
if kwargs.get('omeg', None) is not None:
## get the omega data and turn into arrays
self.omeg = ma.asanyarray(kwargs.get('omeg'))
assert len(self.omeg) == len(self.pres), "Length of omeg array passed to constructor is not the same length as the pres array."
assert self.omeg.ndim == 1, "omeg array is not one dimensional."
assert len(self.omeg) > 1, "omeg array length must have a length greater than 1."
else:
self.omeg = None
## optional keyword argument for location
self.location = kwargs.get('location', None)
self.date = kwargs.get('date', None)
if self.strictQC is True:
self.checkDataIntegrity()
@classmethod
def copy(cls, prof, strictQC=False, **kwargs):
'''
Copies a profile object.
'''
new_kwargs = dict( (k, prof.__dict__[k]) for k in [ 'pres', 'hght', 'tmpc', 'dwpc', 'omeg', 'location', 'date', 'latitude', 'strictQC', 'missing' ])
if prof.u is not None and prof.v is not None:
new_kwargs.update({'u':prof.u, 'v':prof.v})
else:
new_kwargs.update({'wspd':prof.wspd, 'wdir':prof.wdir})
new_kwargs.update({'strictQC':strictQC})
# Create a new profile object using the old profile object data cls is the Class type (e.g., ConvectiveProfile)
new_kwargs.update(kwargs)
new_prof = cls(**new_kwargs)
if hasattr(prof, 'srwind'):
rmu, rmv, lmu, lmv = prof.srwind
new_prof.set_srright(rmu, rmv)
new_prof.set_srleft(lmu, lmv)
return new_prof
def toFile(self, file_name):
snd_file = open(file_name, 'w')
def qc(val):
return -9999. if not utils.QC(val) else val
snd_loc = (" " * (4 - len(self.location))) + self.location
now = datetime.utcnow()
#print(now, self.date)
user = getpass.getuser()
snd_file.write("%TITLE%\n")
snd_file.write("%s %s\n Saved by user: %s on %s UTC\n" % (snd_loc, self.date.strftime("%y%m%d/%H%M"), user, now.strftime('%Y%m%d/%H%M')))
snd_file.write(" LEVEL HGHT TEMP DWPT WDIR WSPD\n")
snd_file.write("-------------------------------------------------------------------\n")
snd_file.write("%RAW%\n")
for idx in range(self.pres.shape[0]):
str = ""
for col in ['pres', 'hght', 'tmpc', 'dwpc', 'wdir', 'wspd']:
str += "%8.2f, " % qc(self.__dict__[col][idx])
snd_file.write(str[:-3] + "\n")
snd_file.write("%END%\n")
snd_file.close()
def checkDataIntegrity(self):
if not qc_tools.isHGHTValid(self.hght):
qc_tools.raiseError("Invalid height data. Data has repeat height values or height does not increase as pressure decreases.", qc_tools.DataQualityException)
if not qc_tools.isTMPCValid(self.tmpc):
qc_tools.raiseError("Invalid temperature data. Profile contains a temperature value < -273.15 Celsius.", qc_tools.DataQualityException)
if not qc_tools.isDWPCValid(self.dwpc):
qc_tools.raiseError("Invalid dewpoint data. Profile contains a dewpoint value < -273.15 Celsius.", qc_tools.DataQualityException)
if not qc_tools.isWSPDValid(self.wspd):
qc_tools.raiseError("Invalid wind speed data. Profile contains a wind speed value < 0 knots.", qc_tools.DataQualityException)
if not qc_tools.isWDIRValid(self.wdir):
qc_tools.raiseError("Invalid wind direction data. Profile contains a wind direction < 0 degrees or >= 360 degrees.", qc_tools.DataQualityException)
class BasicProfile(Profile):
'''
The default data class for SHARPpy.
All other data classes inherit from this class.
This class holds the vertical data for pressure,
height, temperature, dewpoint, and winds. This class
has no indices computed.
'''
def __init__(self, **kwargs):
'''
Create the sounding data object
Parameters
----------
Mandatory Keywords
pres : array_like
The pressure values (Hectopaschals)
hght : array_like
The corresponding height values (Meters)
tmpc : array_like
The corresponding temperature values (Celsius)
dwpc : array_like
The corresponding dewpoint temperature values (Celsius)
Optional Keyword Pairs (must use one or the other)
wdir : array_like
The direction from which the wind is blowing in
meteorological degrees
wspd : array_like
The speed of the wind (kts)
OR
u : array_like
The U-component of the direction from which the wind
is blowing (kts)
v : array_like
The V-component of the direction from which the wind
is blowing. (kts)
Optional Keywords
missing : number (default: sharppy.sharptab.constants.MISSING)
The value of the missing flag
location : string (default: None)
The 3 character station identifier or 4 character
WMO station ID for radiosonde locations. Used for
the PWV database.
strictQC : boolean
A flag that indicates whether or not the strict quality control
routines should be run on the profile upon construction.
Returns
-------
prof: Profile object
'''
super(BasicProfile, self).__init__(**kwargs)
self.strictQC = kwargs.get('strictQC', True)
## did the user provide the wind in vector form?
if self.wdir is not None:
self.wdir[self.wdir == self.missing] = ma.masked
self.wspd[self.wspd == self.missing] = ma.masked
self.wdir[self.wspd.mask] = ma.masked
self.wspd[self.wdir.mask] = ma.masked
self.u, self.v = utils.vec2comp(self.wdir, self.wspd)
## did the user provide the wind in u,v form?
elif self.u is not None:
self.u[self.u == self.missing] = ma.masked
self.v[self.v == self.missing] = ma.masked
self.u[self.v.mask] = ma.masked
self.v[self.u.mask] = ma.masked
self.wdir, self.wspd = utils.comp2vec(self.u, self.v)
## check if any standard deviation data was supplied
if self.tmp_stdev is not None:
self.dew_stdev[self.dew_stdev == self.missing] = ma.masked
self.tmp_stdev[self.tmp_stdev == self.missing] = ma.masked
self.dew_stdev.set_fill_value(self.missing)
self.tmp_stdev.set_fill_value(self.missing)
if self.omeg is not None:
## get the omega data and turn into arrays
self.omeg[self.omeg == self.missing] = ma.masked
else:
self.omeg = ma.masked_all(len(self.hght))
# QC Checks on the arrays passed to the constructor.
qc_tools.areProfileArrayLengthEqual(self)
## mask the missing values
self.pres[self.pres == self.missing] = ma.masked
self.hght[self.hght == self.missing] = ma.masked
self.tmpc[self.tmpc == self.missing] = ma.masked
self.dwpc[self.dwpc == self.missing] = ma.masked
self.logp = np.log10(self.pres.copy())
self.vtmp = thermo.virtemp( self.pres, self.tmpc, self.dwpc )
idx = np.ma.where(self.pres > 0)[0]
self.vtmp[self.dwpc.mask[idx]] = self.tmpc[self.dwpc.mask[idx]] # Masking any virtual temperature
## get the index of the top and bottom of the profile
self.sfc = self.get_sfc()
self.top = self.get_top()
if self.strictQC is True:
self.checkDataIntegrity()
## generate the wetbulb profile
self.wetbulb = self.get_wetbulb_profile()
## generate theta-e profile
self.thetae = self.get_thetae_profile()
## generate theta profile
self.theta = self.get_theta_profile()
## generate water vapor mixing ratio profile
self.wvmr = self.get_wvmr_profile()
## generate rh profile
self.relh = self.get_rh_profile()
def get_sfc(self):
'''
Convenience function to get the index of the surface. It is
determined by finding the lowest level in which a temperature is
reported.
Parameters
----------
None
Returns
-------
Index of the surface
'''
return np.where(~self.tmpc.mask)[0].min()
def get_top(self):
'''
Convenience function to get the index of the surface. It is
determined by finding the lowest level in which a temperature is
reported.
Parameters
----------
None
Returns
-------
Index of the surface
'''
return np.where(~self.tmpc.mask)[0].max()
def get_wvmr_profile(self):
'''
Function to calculate the water vapor mixing ratio profile.
Parameters
----------
None
Returns
-------
Array of water vapor mixing ratio profile
'''
#wvmr = ma.empty(self.pres.shape[0])
#for i in range(len(self.v)):
wvmr = thermo.mixratio( self.pres, self.dwpc )
wvmr[wvmr == self.missing] = ma.masked
wvmr.set_fill_value(self.missing)
return wvmr
def get_wetbulb_profile(self):
'''
Function to calculate the wetbulb profile.
Parameters
----------
None
Returns
-------
Array of wet bulb profile
'''
wetbulb = ma.empty(self.pres.shape[0])
for i in range(len(self.v)):
wetbulb[i] = thermo.wetbulb( self.pres[i], self.tmpc[i], self.dwpc[i] )
wetbulb[wetbulb == self.missing] = ma.masked
wetbulb.set_fill_value(self.missing)
return wetbulb
def get_theta_profile(self):
'''
Function to calculate the theta profile.
Parameters
----------
None
Returns
-------
Array of theta profile
'''
theta = ma.empty(self.pres.shape[0])
for i in range(len(self.v)):
theta[i] = thermo.theta(self.pres[i], self.tmpc[i])
theta[theta == self.missing] = ma.masked
theta.set_fill_value(self.missing)
theta = thermo.ctok(theta)
return theta
def get_thetae_profile(self):
'''
Function to calculate the theta-e profile.
Parameters
----------
None
Returns
-------
Array of theta-e profile
'''
thetae = ma.empty(self.pres.shape[0])
for i in range(len(self.v)):
thetae[i] = thermo.ctok( thermo.thetae(self.pres[i], self.tmpc[i], self.dwpc[i]) )
thetae[thetae == self.missing] = ma.masked
thetae.set_fill_value(self.missing)
return thetae
def get_rh_profile(self):
'''
Function to calculate the relative humidity profile
Parameters
----------
None
Returns
-------
Array of the relative humidity profile
'''
rh = thermo.relh(self.pres, self.tmpc, self.dwpc)
rh[rh == self.missing] = ma.masked
rh.set_fill_value(self.missing)
return rh
class ConvectiveProfile(BasicProfile):
'''
The Convective data class for SHARPPy. This is the class used
to generate the indices that are default for the SPC NSHARP display.
This class inherits from the Profile object.
'''
def __init__(self, **kwargs):
'''
Create the sounding data object
Parameters
----------
Mandatory Keywords
pres : array_like
The pressure values (Hectopaschals)
hght : array_like
The corresponding height values (Meters)
tmpc : array_like
The corresponding temperature values (Celsius)
dwpc : array_like
The corresponding dewpoint temperature values (Celsius)
Optional Keyword Pairs (must use one or the other)
wdir : array_like
The direction from which the wind is blowing in
meteorological degrees
wspd : array_like
The speed of the wind (kts)
OR
u : array_like
The U-component of the direction from which the wind
is blowing
v : array_like
The V-component of the direction from which the wind
is blowing.
missing : number, optional (default: sharppy.sharptab.constants.MISSING)
The value of the missing flag
location : string, optional (default: None)
The 3 character station identifier or 4 character
WMO station ID for radiosonde locations. Used for
the PWV database.
omeg : array_like, optional
List of the vertical velocity in pressure coordinates with height (Pascals/second)
Returns
-------
A profile object
'''
## call the constructor for Profile
super(ConvectiveProfile, self).__init__(**kwargs)
assert np.ma.max(self.pres) > 100, "ConvectiveProfile objects require that the minimum pressure passed in the data array is greater than 100 mb."
self.user_srwind = None
# Generate the fire weather paramters
logging.debug("Calling get_fire().")
dt = datetime.now()
self.get_fire()
logging.debug("get_fire() took: " + str((datetime.now() - dt)))
# Generate the winter inset/precipitation types
logging.debug("Calling get_precip().")
dt = datetime.now()
self.get_precip()
logging.debug("get_precip() took: " + str((datetime.now() - dt)))
## generate various parcels
logging.debug("Calling get_parcels().")
dt = datetime.now()
self.get_parcels()
logging.debug("get_parcels() took: " + str((datetime.now() - dt)))
## calculate thermodynamic window indices
logging.debug("Calling get_thermo().")
dt = datetime.now()
self.get_thermo()
logging.debug("get_thermo() took: " + str((datetime.now() - dt)))
## generate wind indices
logging.debug("Calling get_kinematics().")
dt = datetime.now()
self.get_kinematics()
logging.debug("get_kinematics() took: " + str((datetime.now() - dt)))
## get SCP, STP(cin), STP(fixed), SHIP
logging.debug("Calling get_severe().")
dt = datetime.now()
self.get_severe()
logging.debug("get_severe() took: " + str((datetime.now() - dt)))
## calculate the SARS database matches
logging.debug("Calling get_sars().")
dt = datetime.now()
self.get_sars()
logging.debug("get_sars() took: " + str((datetime.now() - dt)))
## get the precipitable water climatology
logging.debug("Calling get_PWV_loc().")
dt = datetime.now()
self.get_PWV_loc()
logging.debug("get_PWV_loc() took: " + str((datetime.now() - dt)))
## get the parcel trajectory
logging.debug("Calling get_traj().")
dt = datetime.now()
self.get_traj()
logging.debug("get_traj() took: " + str((datetime.now() - dt)))
## miscellaneous indices I didn't know where to put
logging.debug("Calling get_indices().")
dt = datetime.now()
self.get_indices()
logging.debug("get_indices() took: " + str((datetime.now() - dt)))
## get the possible watch type
logging.debug("Calling get_watch().")
dt = datetime.now()
self.get_watch()
logging.debug("get_watch() took: " + str((datetime.now() - dt)))
def get_fire(self):
'''
Function to generate different indices and information
regarding any fire weather in the sounding. This helps fill
the data shown in the FIRE inset.
Parameters
----------
None
Returns
-------
None
'''
self.fosberg = fire.fosberg(self)
self.haines_hght = fire.haines_height(self)
self.haines_low = fire.haines_low(self)
self.haines_mid = fire.haines_mid(self)
self.haines_high = fire.haines_high(self)
self.ppbl_top = params.pbl_top(self)
self.sfc_rh = thermo.relh(self.pres[self.sfc], self.tmpc[self.sfc], self.dwpc[self.sfc])
pres_sfc = self.pres[self.sfc]
pres_1km = interp.pres(self, interp.to_msl(self, 1000.))
self.pbl_h = interp.to_agl(self, interp.hght(self, self.ppbl_top))
self.rh01km = params.mean_relh(self, pbot=pres_sfc, ptop=pres_1km)
self.pblrh = params.mean_relh(self, pbot=pres_sfc, ptop=self.ppbl_top)
self.meanwind01km = winds.mean_wind(self, pbot=pres_sfc, ptop=pres_1km)
self.meanwindpbl = winds.mean_wind(self, pbot=pres_sfc, ptop=self.ppbl_top)
self.pblmaxwind = winds.max_wind(self, lower=0, upper=self.pbl_h)
#self.pblmaxwind = [np.ma.masked, np.ma.masked]
mulplvals = params.DefineParcel(self, flag=3, pres=500)
mupcl = params.cape(self, lplvals=mulplvals)
self.bplus_fire = mupcl.bplus
def get_precip(self):
'''
Function to generate different indices and information
regarding any precipitation in the sounding. This helps fill
the data shown in the WINTER inset.
Returns nothing, but sets the following
variables:
self.dgz_pbot, self.dgz_ptop : the dendretic growth zone (DGZ) top and bottom (mb)
self.dgz_meanrh : DGZ mean relative humidity (%)
self.dgz_pw : the preciptable water vapor in the DGZ (inches)
self.dgz_meanq : the mean water vapor mixing ratio in the DGZ (g/kg)
self.dgz_meanomeg : the mean omega in the DGZ (microbars/second)
self.oprh : the OPRH variable (units don't mean anything)
self.plevel, self.phase, self.tmp, self.st : the initial phase, level, temperature, and state of any precip in the sounding
self.tpos, self.tneg, self.ttop, self.tbot : positive and negative temperature layers in the sounding
self.wpos, self.wneg, self.wtop, self.wbot : positive and negative wetbulb layers in the soundings
self.precip_type : the best guess precipitation type
Parameters
----------
None
Returns
-------
None
'''
self.dgz_pbot, self.dgz_ptop = params.dgz(self)
self.dgz_meanrh = params.mean_relh(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)
self.dgz_pw = params.precip_water(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)
self.dgz_meanq = params.mean_mixratio(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)
self.dgz_meanomeg = params.mean_omega(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop) * 10 # to microbars/sec
self.oprh = self.dgz_meanomeg * self.dgz_pw * (self.dgz_meanrh/100.)
self.plevel, self.phase, self.tmp, self.st = watch_type.init_phase(self)
self.tpos, self.tneg, self.ttop, self.tbot = watch_type.posneg_temperature(self, start=self.plevel)
self.wpos, self.wneg, self.wtop, self.wbot = watch_type.posneg_wetbulb(self, start=self.plevel)
self.precip_type = watch_type.best_guess_precip(self, self.phase, self.plevel, self.tmp, self.tpos, self.tneg)
def get_parcels(self):
'''
Function to generate various parcels and parcel
traces.
Returns nothing, but sets the following
variables:
self.mupcl : Most Unstable Parcel
self.sfcpcl : Surface Based Parcel
self.mlpcl : Mixed Layer Parcel
self.fcstpcl : Forecast Surface Parcel
self.ebottom : The bottom pressure level of the effective inflow layer
self.etop : the top pressure level of the effective inflow layer
self.ebotm : The bottom, meters (agl), of the effective inflow layer
self.etopm : The top, meters (agl), of the effective inflow layer
Parameters
----------
None
Returns
-------
None
'''
self.mupcl = params.parcelx( self, flag=3 )
if self.mupcl.lplvals.pres == self.pres[self.sfc]:
self.sfcpcl = self.mupcl
else:
self.sfcpcl = params.parcelx( self, flag=1 )
self.fcstpcl = params.parcelx( self, flag=2 )
self.mlpcl = params.parcelx( self, flag=4 )
self.usrpcl = params.Parcel()
## get the effective inflow layer data
self.ebottom, self.etop = params.effective_inflow_layer( self, mupcl=self.mupcl )
## if there was no effective inflow layer, set the values to masked
if self.etop is ma.masked or self.ebottom is ma.masked:
self.ebotm = ma.masked; self.etopm = ma.masked
self.effpcl = self.sfcpcl # Default to surface parcel, as in params.DefineProfile().
## otherwise, interpolate the heights given to above ground level
else:
self.ebotm = interp.to_agl(self, interp.hght(self, self.ebottom))
self.etopm = interp.to_agl(self, interp.hght(self, self.etop))
# The below code was adapted from params.DefineProfile()
# Lifting one additional parcel probably won't slow the program too much.
# It's just one more lift compared to all the lifts in the params.effective_inflow_layer() call.
mtha = params.mean_theta(self, self.ebottom, self.etop)
mmr = params.mean_mixratio(self, self.ebottom, self.etop)
effpres = (self.ebottom+self.etop)/2.
efftmpc = thermo.theta(1000., mtha, effpres)
effdwpc = thermo.temp_at_mixrat(mmr, effpres)
self.effpcl = params.parcelx(self, flag=5, pres=effpres, tmpc=efftmpc, dwpc=effdwpc) #This is the effective parcel.
def get_kinematics(self):
'''
Function to generate the numerous kinematic quantities
used for display and calculations. It requires that the
parcel calculations have already been called for the lcl
to el shear and mean wind vectors, as well as indices
that require an effective inflow layer.
Parameters
----------
None
Returns
-------
None
'''
sfc = self.pres[self.sfc]
heights = np.array([1000., 3000., 4000., 5000., 6000., 8000., 9000.])
p1km, p3km, p4km, p5km, p6km, p8km, p9km = interp.pres(self, interp.to_msl(self, heights))
## 1km and 6km winds
self.wind1km = interp.vec(self, p1km)
self.wind6km = interp.vec(self, p6km)
## calcluate wind shear
self.sfc_1km_shear = winds.wind_shear(self, pbot=sfc, ptop=p1km)
self.sfc_3km_shear = winds.wind_shear(self, pbot=sfc, ptop=p3km)
self.sfc_6km_shear = winds.wind_shear(self, pbot=sfc, ptop=p6km)
self.sfc_8km_shear = winds.wind_shear(self, pbot=sfc, ptop=p8km)
self.sfc_9km_shear = winds.wind_shear(self, pbot=sfc, ptop=p9km)
self.lcl_el_shear = winds.wind_shear(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres)
## calculate mean wind
self.mean_1km = utils.comp2vec(*winds.mean_wind(self, pbot=sfc, ptop=p1km))
self.mean_3km = utils.comp2vec(*winds.mean_wind(self, pbot=sfc, ptop=p3km))
self.mean_6km = utils.comp2vec(*winds.mean_wind(self, pbot=sfc, ptop=p6km))
self.mean_8km = utils.comp2vec(*winds.mean_wind(self, pbot=sfc, ptop=p8km))
self.mean_lcl_el = utils.comp2vec(*winds.mean_wind(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres))
## parameters that depend on the presence of an effective inflow layer
if self.etop is ma.masked or self.ebottom is ma.masked:
self.etopm = ma.masked; self.ebotm = ma.masked
self.bunkers = winds.non_parcel_bunkers_motion( self )
if self.user_srwind is None:
self.user_srwind = self.bunkers
self.srwind = self.user_srwind
self.eff_shear = [MISSING, MISSING]
self.ebwd = [MISSING, MISSING, MISSING]
self.ebwspd = MISSING
self.mean_eff = [MISSING, MISSING, MISSING]
self.mean_ebw = [MISSING, MISSING, MISSING]
self.right_srw_eff = [MISSING, MISSING, MISSING]
self.right_srw_ebw = [MISSING, MISSING, MISSING]
self.right_esrh = [ma.masked, ma.masked, ma.masked]
self.right_critical_angle = ma.masked
self.left_srw_eff = [MISSING, MISSING, MISSING]
self.left_srw_ebw = [MISSING, MISSING, MISSING]
self.left_esrh = [ma.masked, ma.masked, ma.masked]
self.left_critical_angle = ma.masked
else:
self.bunkers = params.bunkers_storm_motion(self, mupcl=self.mupcl, pbot=self.ebottom)
if self.user_srwind is None:
self.user_srwind = self.bunkers
self.srwind = self.user_srwind
depth = ( self.mupcl.elhght - self.ebotm ) / 2
elh = interp.pres(self, interp.to_msl(self, self.ebotm + depth))
## calculate mean wind
self.mean_eff = winds.mean_wind(self, self.ebottom, self.etop )
self.mean_ebw = winds.mean_wind(self, pbot=self.ebottom, ptop=elh )
## calculate wind shear of the effective layer
self.eff_shear = winds.wind_shear(self, pbot=self.ebottom, ptop=self.etop)
self.ebwd = winds.wind_shear(self, pbot=self.ebottom, ptop=elh)
self.ebwspd = utils.mag( self.ebwd[0], self.ebwd[1] )
## calculate quantities relative to the right-mover vector
self.right_srw_eff = winds.sr_wind(self, pbot=self.ebottom, ptop=self.etop, stu=self.srwind[0], stv=self.srwind[1] )
self.right_srw_ebw = winds.sr_wind(self, pbot=self.ebottom, ptop=elh, stu=self.srwind[0], stv=self.srwind[1] )
self.right_esrh = winds.helicity(self, self.ebotm, self.etopm, stu=self.srwind[0], stv=self.srwind[1])
self.right_critical_angle = winds.critical_angle(self, stu=self.srwind[0], stv=self.srwind[1])
## calculate quantities relative to the left-mover vector
self.left_srw_eff = winds.sr_wind(self, pbot=self.ebottom, ptop=self.etop, stu=self.srwind[2], stv=self.srwind[3] )
self.left_srw_ebw = winds.sr_wind(self, pbot=self.ebottom, ptop=elh, stu=self.srwind[2], stv=self.srwind[3] )
self.left_esrh = winds.helicity(self, self.ebotm, self.etopm, stu=self.srwind[2], stv=self.srwind[3])
self.left_critical_angle = winds.critical_angle(self, stu=self.srwind[2], stv=self.srwind[3])
## calculate quantities relative to the right-mover vector
self.right_srw_1km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p1km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_3km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p3km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_6km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p6km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_8km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p8km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_4_5km = utils.comp2vec(*winds.sr_wind(self, pbot=p4km, ptop=p5km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_lcl_el = utils.comp2vec(*winds.sr_wind(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres, stu=self.srwind[0], stv=self.srwind[1] ))
# This is for the red, blue, and purple bars that appear on the SR Winds vs. Height plot
self.right_srw_0_2km = winds.sr_wind(self, pbot=sfc, ptop=interp.pres(self, interp.to_msl(self, 2000.)), stu=self.srwind[0], stv=self.srwind[1])
self.right_srw_4_6km = winds.sr_wind(self, pbot=interp.pres(self, interp.to_msl(self, 4000.)), ptop=p6km, stu=self.srwind[0], stv=self.srwind[1])
self.right_srw_9_11km = winds.sr_wind(self, pbot=interp.pres(self, interp.to_msl(self, 9000.)), ptop=interp.pres(self, interp.to_msl(self, 11000.)), stu=self.srwind[0], stv=self.srwind[1])
## calculate quantities relative to the left-mover vector
self.left_srw_1km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p1km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_3km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p3km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_6km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p6km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_8km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p8km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_4_5km = utils.comp2vec(*winds.sr_wind(self, pbot=p4km, ptop=p5km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_lcl_el = utils.comp2vec(*winds.sr_wind(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres, stu=self.srwind[2], stv=self.srwind[3] ))
# This is for the red, blue, and purple bars that appear on the SR Winds vs. Height plot
self.left_srw_0_2km = winds.sr_wind(self, pbot=sfc, ptop=interp.pres(self, interp.to_msl(self, 2000.)), stu=self.srwind[2], stv=self.srwind[3])
self.left_srw_4_6km = winds.sr_wind(self, pbot=interp.pres(self, interp.to_msl(self, 4000.)), ptop=p6km, stu=self.srwind[2], stv=self.srwind[3])
self.left_srw_9_11km = winds.sr_wind(self, pbot=interp.pres(self, interp.to_msl(self, 9000.)), ptop=interp.pres(self, interp.to_msl(self, 11000.)), stu=self.srwind[2], stv=self.srwind[3])
## calculate upshear and downshear
self.upshear_downshear = winds.mbe_vectors(self)
self.right_srh1km = winds.helicity(self, 0, 1000., stu=self.srwind[0], stv=self.srwind[1])
self.right_srh3km = winds.helicity(self, 0, 3000., stu=self.srwind[0], stv=self.srwind[1])
self.left_srh1km = winds.helicity(self, 0, 1000., stu=self.srwind[2], stv=self.srwind[3])
self.left_srh3km = winds.helicity(self, 0, 3000., stu=self.srwind[2], stv=self.srwind[3])
self.srw_eff = self.right_srw_eff
self.srw_ebw = self.right_srw_ebw
self.esrh = self.right_esrh
self.critical_angle = self.right_critical_angle
self.srw_1km = self.right_srw_1km
self.srw_3km = self.right_srw_3km
self.srw_6km = self.right_srw_6km
self.srw_8km = self.right_srw_8km
self.srw_4_5km = self.right_srw_4_5km
self.srw_lcl_el = self.right_srw_lcl_el
self.srw_0_2km = self.right_srw_0_2km
self.srw_4_6km = self.right_srw_4_6km
self.srw_9_11km = self.right_srw_9_11km
self.srh1km = self.right_srh1km
self.srh3km = self.right_srh3km
def get_thermo(self):
'''
Function to generate thermodynamic indices.
Function returns nothing, but sets the following
variables:
self.k_idx - K Index, a severe weather index
self.pwat - Precipitable Water Vapor (inches)
self.lapserate_3km - 0 to 3km AGL lapse rate (C/km)
self.lapserate_3_6km - 3 to 6km AGL lapse rate (C/km)
self.lapserate_850_500 - 850 to 500mb lapse rate (C/km)
self.lapserate_700_500 - 700 to 500mb lapse rate (C/km)
self.convT - The Convective Temperature (F)
self.maxT - The Maximum Forecast Surface Temp (F)
self.mean_mixr - Mean Mixing Ratio
self.low_rh - low level mean relative humidity
self.mid_rh - mid level mean relative humidity
self.totals_totals - Totals Totals index, a severe weather index
Parameters
----------
None
Returns
-------
None
'''
## either get or calculate the indices, round to the nearest int, and
## convert them to strings.
## K Index
self.k_idx = params.k_index( self )
## precipitable water
self.pwat = params.precip_water( self )
## 0-3km agl lapse rate
self.lapserate_3km = params.lapse_rate( self, 0., 3000., pres=False )
## 3-6km agl lapse rate
self.lapserate_3_6km = params.lapse_rate( self, 3000., 6000., pres=False )
## 850-500mb lapse rate
self.lapserate_850_500 = params.lapse_rate( self, 850., 500., pres=True )
## 700-500mb lapse rate
self.lapserate_700_500 = params.lapse_rate( self, 700., 500., pres=True )
## 2-6 km max lapse rate
self.max_lapse_rate_2_6 = params.max_lapse_rate( self )
## convective temperature
self.convT = thermo.ctof( params.convective_temp( self ) )
## sounding forecast surface temperature
self.maxT = thermo.ctof( params.max_temp( self ) )
#fzl = str(int(self.sfcparcel.hght0c))
## 100mb mean mixing ratio
self.mean_mixr = params.mean_mixratio( self )
## 150mb mean rh
self.low_rh = params.mean_relh( self )
self.mid_rh = params.mean_relh( self, pbot=(self.pres[self.sfc] - 150),
ptop=(self.pres[self.sfc] - 350) )
## calculate the totals totals index
self.totals_totals = params.t_totals( self )
## calculate the inferred temperature advection
self.inf_temp_adv = params.inferred_temp_adv(self, lat=self.latitude)
def get_severe(self):
'''
Function to calculate special severe weather indices.
Requires calling get_parcels() and get_kinematics().
Returns nothing, but sets the following variables:
self.right_stp_fixed - fixed layer significant tornado parameter (computed with SRH relative to the right-mover vector)
self.left_stp_fixed - fixed layer significant tornado parameter (computed with SRH relative to the left-mover vector)
self.right_stp_cin - effective layer significant tornado parameter (computed with SRH relative to the right-mover vector)
self.left_stp_cin - effective layer significant tornado parameter (computed with SRH relative to the left-mover vector)
self.right_scp - right moving supercell composite parameter
self.left_scp - left moving supercell composite parameter
Parameters
----------
None
Returns
-------
None
'''
wspd = utils.mag(self.sfc_6km_shear[0], self.sfc_6km_shear[1])
self.right_stp_fixed = params.stp_fixed(self.sfcpcl.bplus, self.sfcpcl.lclhght, self.right_srh1km[0], utils.KTS2MS(wspd))
self.left_stp_fixed = params.stp_fixed(self.sfcpcl.bplus, self.sfcpcl.lclhght, self.left_srh1km[0], utils.KTS2MS(wspd))
self.sherbe = params.sherb(self, effective=True)
if self.etop is np.ma.masked or self.ebottom is np.ma.masked:
self.right_scp = 0.0; self.left_scp = 0.0
self.right_stp_cin = 0.0; self.left_stp_cin = 0.0
else:
self.right_scp = params.scp( self.mupcl.bplus, self.right_esrh[0], utils.KTS2MS(self.ebwspd))
self.left_scp = params.scp( self.mupcl.bplus, self.left_esrh[0], utils.KTS2MS(self.ebwspd))
right_esrh = self.right_esrh[0]
left_esrh = self.left_esrh[0]
if self.latitude < 0:
right_esrh = -right_esrh
left_esrh = -left_esrh
self.right_stp_cin = params.stp_cin(self.mlpcl.bplus, right_esrh, utils.KTS2MS(self.ebwspd),
self.mlpcl.lclhght, self.mlpcl.bminus)
self.left_stp_cin = params.stp_cin(self.mlpcl.bplus, left_esrh, utils.KTS2MS(self.ebwspd),
self.mlpcl.lclhght, self.mlpcl.bminus)
if self.latitude < 0:
self.right_stp_cin = -self.right_stp_cin
self.left_stp_cin = -self.left_stp_cin
if self.latitude < 0:
self.stp_fixed = self.left_stp_fixed
self.stp_cin = self.left_stp_cin
self.scp = self.left_scp
else:
self.stp_fixed = self.right_stp_fixed
self.stp_cin = self.right_stp_cin
self.scp = self.right_scp
def get_sars(self):
'''
Function to get the SARS analogues from the hail and
supercell databases. Requires calling get_kinematics()
and get_parcels() first. Also calculates the significant
hail parameter.
Function returns nothing, but sets the following variables:
self.matches - the matches from SARS HAIL
self.ship - significant hail parameter
self.supercell_matches - the matches from SARS SUPERCELL
Parameters
----------
None
Returns
-------
None
'''
sfc_6km_shear = utils.KTS2MS( utils.mag( self.sfc_6km_shear[0], self.sfc_6km_shear[1]) )
sfc_3km_shear = utils.KTS2MS( utils.mag( self.sfc_3km_shear[0], self.sfc_3km_shear[1]) )
sfc_9km_shear = utils.KTS2MS( utils.mag( self.sfc_9km_shear[0], self.sfc_9km_shear[1]) )
h500t = interp.temp(self, 500.)
lapse_rate = params.lapse_rate( self, 700., 500., pres=True )
right_srh3km = self.right_srh3km[0]
right_srh1km = self.right_srh1km[0]
left_srh3km = self.left_srh3km[0]
left_srh1km = self.left_srh1km[0]
mucape = self.mupcl.bplus
mlcape = self.mlpcl.bplus
mllcl = self.mlpcl.lclhght
mumr = thermo.mixratio(self.mupcl.pres, self.mupcl.dwpc)
self.ship = params.ship(self)
self.hail_database = 'sars_hail.txt'
self.supercell_database = 'sars_supercell.txt'
try:
self.right_matches = hail(self.hail_database, mumr, mucape, h500t, lapse_rate, sfc_6km_shear,
sfc_9km_shear, sfc_3km_shear, right_srh3km)
except:
self.right_matches = ([], [], 0, 0, 0)
try:
self.left_matches = hail(self.hail_database, mumr, mucape, h500t, lapse_rate, sfc_6km_shear,
sfc_9km_shear, sfc_3km_shear, -left_srh3km)
except:
self.left_matches = ([], [], 0, 0, 0)
try:
self.right_supercell_matches = supercell(self.supercell_database, mlcape, mllcl, h500t, lapse_rate,
utils.MS2KTS(sfc_6km_shear), right_srh1km, utils.MS2KTS(sfc_3km_shear), utils.MS2KTS(sfc_9km_shear),
right_srh3km)
except:
self.right_supercell_matches = ([], [], 0, 0, 0)
try:
self.left_supercell_matches = supercell(self.supercell_database, mlcape, mllcl, h500t, lapse_rate,
utils.MS2KTS(sfc_6km_shear), -left_srh1km, utils.MS2KTS(sfc_3km_shear), utils.MS2KTS(sfc_9km_shear),
-left_srh3km)
except Exception as e:
self.left_supercell_matches = ([], [], 0, 0, 0)
if self.latitude < 0:
self.supercell_matches = self.left_supercell_matches
self.matches = self.left_matches
else:
self.supercell_matches = self.right_supercell_matches
self.matches = self.right_matches
def get_watch(self):
'''
Function to get the possible watch type.
Function returns nothing, but sets the following
variables:
self.watch_type - possible watch type
Parameters
----------
None
Returns
-------
None
'''
watch_types = watch_type.possible_watch(self, use_left=False)
self.right_watch_type = watch_types[0]
watch_types = watch_type.possible_watch(self, use_left=True)
self.left_watch_type = watch_types[0]
if self.latitude < 0:
self.watch_type = self.left_watch_type
else:
self.watch_type = self.right_watch_type
def get_traj(self):
'''
Function to compute the storm slinky profile using
the trajectory model.
self.slinky_traj - the list containing the position vector for the updraft
self.updraft_tilt - the updraft tilt (an angle) with respect to the horizon
Parameters
----------
None
Returns
-------
None
'''
parcel = self.mupcl
slinky = params.parcelTraj(self, parcel)
if slinky == None:
self.slinky_traj = ma.masked
self.updraft_tilt = ma.masked
else:
self.slinky_traj = slinky[0]
self.updraft_tilt = slinky[1]
def get_PWV_loc(self):
'''
Function to compute the location of the current PWV with respect to
it's sounding climatology from Bunkers.
Parameters
----------
None
Returns
-------
None
'''
self.pwv_flag = pwv_climo(self, self.location, month=int(self.date.strftime('%m')))
def get_indices(self):
'''
Function to set any additional indices that are included in the
thermo window.
Parameters
----------
None
Returns
-------
None
'''
self.tei = params.tei(self)
self.esp = params.esp(self)
self.mmp = params.mmp(self)
self.wndg = params.wndg(self)
self.sig_severe = params.sig_severe(self)
self.dcape, self.dpcl_ttrace, self.dpcl_ptrace = params.dcape(self)
self.drush = thermo.ctof(self.dpcl_ttrace[-1])
self.mburst = params.mburst(self)
def set_srleft(self, lm_u, lm_v):
'''
Sets the u and v values of the left mover supercell storm motion vector.
Parameters
----------
lm_u : number
Left mover u-component of the storm motion vector
lm_v : number
Left mover v-component of the storm motion vector
Returns
-------
None
'''
self.user_srwind = self.user_srwind[:2] + (lm_u, lm_v)
self.get_kinematics()
self.get_severe()
def set_srright(self, rm_u, rm_v):
'''
Sets the u and v values of the right mover supercell storm motion vector.
Parameters
----------
rm_u : number
Right mover u-component of the storm motion vector
rm_v : number
Right mover v-component of the storm motion vector
Returns
-------
None
'''
self.user_srwind = (rm_u, rm_v) + self.user_srwind[2:]
self.get_kinematics()
self.get_severe()
def reset_srm(self):
'''
Resets the storm motion vector to those found by the Bunkers algorithm
Parameters
----------
None
Returns
-------
None
'''
self.user_srwind = self.bunkers
self.get_kinematics()
self.get_severe()
|
10_pipeline/sagemaker_mlops/sagemaker-project-modelbuild/tests/test_pipelines.py | dpai/workshop | 2,327 | 12618430 | <reponame>dpai/workshop
import pytest
@pytest.mark.xfail
def test_that_you_wrote_tests():
assert False, "No tests written"
def test_pipelines_importable():
import pipelines # noqa: F401
|
interop_2016/ex2_conditionals/test_condition3.py | fallenfuzz/pynet | 528 | 12618478 | # Testing truth
if 1:
print "1 is True"
if not "":
print "Null string is False"
if not None:
print "None is False"
if ['whatever']:
print "List with one element is True"
|
integrations/wandb/scripts/sweeps_using_config.py | apjanco/projects | 823 | 12618481 | import typer
from pathlib import Path
from spacy.training.loop import train
from spacy.training.initialize import init_nlp
from spacy import util
from thinc.api import Config
import wandb
def main(default_config: Path, output_path: Path):
loaded_local_config = util.load_config(default_config)
with wandb.init() as run:
sweeps_config = Config(util.dot_to_dict(run.config))
merged_config = Config(loaded_local_config).merge(sweeps_config)
nlp = init_nlp(merged_config)
train(nlp, output_path, use_gpu=True)
if __name__ == "__main__":
typer.run(main) |
problog/nnf_formula.py | HEmile/problog | 189 | 12618486 | import warnings
warnings.warn(
"The class nnf_formula.NNF has been renamed to ddnnf_formula.DDNNF. Please update your code!"
)
|
djstripe/models/__init__.py | ExtraE113/dj-stripe | 937 | 12618492 | <filename>djstripe/models/__init__.py
from .account import Account
from .api import APIKey
from .base import IdempotencyKey, StripeModel
from .billing import (
Coupon,
Invoice,
InvoiceItem,
Plan,
Subscription,
SubscriptionItem,
SubscriptionSchedule,
TaxId,
TaxRate,
UpcomingInvoice,
UsageRecord,
UsageRecordSummary,
)
from .checkout import Session
from .connect import (
ApplicationFee,
ApplicationFeeRefund,
CountrySpec,
Transfer,
TransferReversal,
)
from .core import (
BalanceTransaction,
Charge,
Customer,
Dispute,
Event,
File,
FileLink,
FileUpload,
PaymentIntent,
Payout,
Price,
Product,
Refund,
SetupIntent,
)
from .payment_methods import (
BankAccount,
Card,
DjstripePaymentMethod,
PaymentMethod,
Source,
)
from .sigma import ScheduledQueryRun
from .webhooks import WebhookEventTrigger
__all__ = [
"Account",
"APIKey",
"ApplicationFee",
"ApplicationFeeRefund",
"BalanceTransaction",
"BankAccount",
"Card",
"Charge",
"CountrySpec",
"Coupon",
"Customer",
"Dispute",
"DjstripePaymentMethod",
"Event",
"File",
"FileLink",
"FileUpload",
"IdempotencyKey",
"Invoice",
"InvoiceItem",
"PaymentIntent",
"PaymentMethod",
"Payout",
"Plan",
"Price",
"Product",
"Refund",
"SetupIntent",
"Session",
"ScheduledQueryRun",
"Source",
"StripeModel",
"Subscription",
"SubscriptionItem",
"SubscriptionSchedule",
"TaxId",
"TaxRate",
"Transfer",
"TransferReversal",
"UpcomingInvoice",
"UsageRecord",
"UsageRecordSummary",
"WebhookEventTrigger",
]
|
var/spack/repos/builtin/packages/camellia/package.py | BenWibking/spack | 2,360 | 12618497 | <reponame>BenWibking/spack
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Camellia(CMakePackage):
"""Camellia: user-friendly MPI-parallel adaptive finite element package,
with support for DPG and other hybrid methods, built atop Trilinos.
"""
homepage = "https://bitbucket.org/nateroberts/Camellia"
git = "https://bitbucket.org/nateroberts/camellia.git"
maintainers = ['CamelliaDPG']
version('master', branch='master')
variant('moab', default=True, description='Compile with MOAB to include support for reading standard mesh formats')
depends_on('trilinos+amesos+amesos2+belos+epetra+epetraext+exodus+ifpack+ifpack2+intrepid+intrepid2+kokkos+ml+muelu+sacado+shards+tpetra+zoltan+mumps+superlu-dist+hdf5+mpi@master,12.12.1:')
depends_on('moab@:4', when='+moab')
# Cameilla needs hdf5 but the description "hdf5@:1.8" is
# determined that "1.8.10" or "1.8.21" does not work.
# See https://github.com/spack/spack/pull/8337
depends_on('hdf5@:1.8.21')
depends_on('mpi')
def cmake_args(self):
spec = self.spec
options = [
'-DTrilinos_PATH:PATH=%s' % spec['trilinos'].prefix,
'-DMPI_DIR:PATH=%s' % spec['mpi'].prefix,
'-DBUILD_FOR_INSTALL:BOOL=ON'
]
if '+moab' in spec:
options.extend([
'-DENABLE_MOAB:BOOL=ON',
'-DMOAB_PATH:PATH=%s' % spec['moab'].prefix
])
else:
options.append('-DENABLE_MOAB:BOOL=OFF')
return options
|
anuga/culvert_flows/tests/run_culvert_flat_water_lev.py | samcom12/anuga_core | 136 | 12618500 | """ Testing CULVERT (Changing from Horizontal Abstraction to Vertical Abstraction
This example includes a Model Topography that shows a TYPICAL Headwall Configuration
The aim is to change the Culvert Routine to Model more precisely the abstraction
from a vertical face.
The inflow must include the impact of Approach velocity.
Similarly the Outflow has MOMENTUM Not just Up welling as in the Horizontal Style
abstraction
"""
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
print('Starting.... Importing Modules...')
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_cross
from anuga.shallow_water import Domain, Reflective_boundary,\
Dirichlet_boundary,\
Transmissive_boundary, Time_boundary
from anuga.culvert_flows.culvert_class import Culvert_flow
from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model
from math import pi,pow,sqrt
import numpy as num
#------------------------------------------------------------------------------
# Setup computational domain
#------------------------------------------------------------------------------
print('Setting up domain')
length = 40.
width = 5.
dx = dy = 1 # Resolution: Length of subdivisions on both axes
#dx = dy = .5 # Resolution: Length of subdivisions on both axes
#dx = dy = .5 # Resolution: Length of subdivisions on both axes
#dx = dy = .1 # Resolution: Length of subdivisions on both axes
points, vertices, boundary = rectangular_cross(int(old_div(length,dx)), int(old_div(width,dy)),
len1=length, len2=width)
domain = Domain(points, vertices, boundary)
domain.set_name('Test_Culv_Flat_WL') # Output name
domain.set_default_order(2)
domain.H0 = 0.01
domain.tight_slope_limiters = 1
print('Size', len(domain))
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
def topography(x, y):
"""Set up a weir
A culvert will connect either side
"""
# General Slope of Topography
z=old_div(-x,1000)
# NOW Add bits and Pieces to topography
N = len(x)
for i in range(N):
# Sloping Embankment Across Channel
if 5.0 < x[i] < 10.1:
if 1.0+(x[i]-5.0)/5.0 < y[i] < 4.0 - (x[i]-5.0)/5.0: # Cut Out Segment for Culvert FACE
z[i]=z[i]
else:
z[i] += 0.5*(x[i] -5.0) # Sloping Segment U/S Face
if 10.0 < x[i] < 12.1:
z[i] += 2.5 # Flat Crest of Embankment
if 12.0 < x[i] < 14.5:
if 2.0-(x[i]-12.0)/2.5 < y[i] < 3.0 + (x[i]-12.0)/2.5: # Cut Out Segment for Culvert FACE
z[i]=z[i]
else:
z[i] += 2.5-1.0*(x[i] -12.0) # Sloping D/S Face
return z
print('Setting Quantities....')
domain.set_quantity('elevation', topography) # Use function for elevation
domain.set_quantity('friction', 0.01) # Constant friction
domain.set_quantity('stage',
expression='elevation') # Dry initial condition
#------------------------------------------------------------------------------
# Setup specialised forcing terms
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Setup CULVERT INLETS and OUTLETS in Current Topography
#------------------------------------------------------------------------------
print('DEFINING any Structures if Required')
# DEFINE CULVERT INLET AND OUTLETS
culvert_rating = Culvert_flow(domain,
culvert_description_filename='example_rating_curve.csv',
end_point0=[9.0, 2.5],
end_point1=[13.0, 2.5],
verbose=True)
culvert_energy = Culvert_flow(domain,
label='Culvert No. 1',
description='This culvert is a test unit 1.2m Wide by 0.75m High',
end_point0=[9.0, 2.5],
end_point1=[13.0, 2.5],
width=1.20,height=0.75,
culvert_routine=boyd_generalised_culvert_model,
number_of_barrels=1,
update_interval=2,
log_file=True,
discharge_hydrograph=True,
verbose=True)
domain.forcing_terms.append(culvert_energy)
#------------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
print('Setting Boundary Conditions')
Bi = Dirichlet_boundary([0.0, 0.0, 0.0]) # Inflow based on Flow Depth and Approaching Momentum !!!
Br = Reflective_boundary(domain) # Solid reflective wall
Bo = Dirichlet_boundary([-5, 0, 0]) # Outflow
Btus = Time_boundary(domain, lambda t: [0.0+ 1.25*(1+num.sin(old_div(2*pi*(t-4),10))), 0.0, 0.0])
Btds = Time_boundary(domain, lambda t: [0.0+ 0.75*(1+num.sin(old_div(2*pi*(t-4),20))), 0.0, 0.0])
domain.set_boundary({'left': Btus, 'right': Btds, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
#for t in domain.evolve(yieldstep = 1, finaltime = 25):
# print domain.timestepping_statistics()
#import sys; sys.exit()
# Profiling code
import time
t0 = time.time()
s = 'for t in domain.evolve(yieldstep = 1, finaltime = 25): domain.write_time()'
import profile, pstats
FN = 'profile.dat'
profile.run(s, FN)
print('That took %.2f seconds' %(time.time()-t0))
S = pstats.Stats(FN)
#S.sort_stats('time').print_stats(20)
s = S.sort_stats('cumulative').print_stats(30)
print(s)
|
rainbow/4-multistep/memory.py | g6ling/Pytorch-Cartpole | 116 | 12618525 | import random
from collections import namedtuple, deque
from config import n_step, gamma
Transition = namedtuple('Transition', ('state', 'next_state', 'action', 'reward', 'mask'))
class Memory(object):
def __init__(self, capacity):
self.memory = deque(maxlen=capacity)
self.capacity = capacity
self.reset_local()
def reset_local(self):
self.local_step = 0
self.local_state = None
self.local_action = None
self.local_rewards = []
def push(self, state, next_state, action, reward, mask):
self.local_step += 1
self.local_rewards.append(reward)
if self.local_step == 1:
self.local_state = state
self.local_action = action
if self.local_step == n_step:
reward = 0
for idx, local_reward in enumerate(self.local_rewards):
reward += (gamma ** idx) * local_reward
self.memory.append(Transition(self.local_state, next_state, self.local_action, reward, mask))
self.reset_local()
if mask == 0:
self.reset_local()
def sample(self, batch_size):
transitions = random.sample(self.memory, batch_size)
batch = Transition(*zip(*transitions))
return batch
def __len__(self):
return len(self.memory)
|
cython/setup.py | KKobuszewski/cuda | 126 | 12618529 | # from future.utils import iteritems
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
def find_in_path(name, path):
"""Find a file in a search path"""
# Adapted fom http://code.activestate.com/recipes/52224
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found,
everything is based on finding 'nvcc' in the PATH.
"""
# First check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# Otherwise, search the PATH for NVCC
nvcc = find_in_path('nvcc', os.environ['PATH'])
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, '
'or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in iter(cudaconfig.items()):
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be '
'located in %s' % (k, v))
return cudaconfig
def customize_compiler_for_nvcc(self):
"""Inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on.
"""
# Tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# Save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# Now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1
# translated from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# Reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# Inject our redefined _compile method into the class
self._compile = _compile
# Run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
ext = Extension('gpuadder',
sources = ['src/manager.cu', 'wrapper.pyx'],
library_dirs = [CUDA['lib64']],
libraries = ['cudart'],
language = 'c++',
runtime_library_dirs = [CUDA['lib64']],
# This syntax is specific to this build system
# we're only going to use certain compiler args with nvcc
# and not with gcc the implementation of this trick is in
# customize_compiler()
extra_compile_args= {
'gcc': [],
'nvcc': [
'-arch=sm_30', '--ptxas-options=-v', '-c',
'--compiler-options', "'-fPIC'"
]
},
include_dirs = [numpy_include, CUDA['include'], 'src']
)
setup(name = 'gpuadder',
# Random metadata. there's more you can supply
author = '<NAME>',
version = '0.1',
ext_modules = [ext],
# Inject our custom trigger
cmdclass = {'build_ext': custom_build_ext},
# Since the package has c code, the egg cannot be zipped
zip_safe = False)
|
fs/tests/zipfs_binary_test.py | jwilk-forks/pyfilesystem | 314 | 12618541 | <filename>fs/tests/zipfs_binary_test.py<gh_stars>100-1000
"""
Test case for ZipFS binary file reading/writing
Passes ok on Linux, fails on Windows (tested: Win7, 64-bit):
AssertionError: ' \r\n' != ' \n'
"""
import unittest
from fs.zipfs import ZipFS
import os
from six import b
class ZipFsBinaryWriteRead(unittest.TestCase):
test_content = b(chr(32) + chr(10))
def setUp(self):
self.z = ZipFS('test.zip', 'w')
def tearDown(self):
try:
os.remove('test.zip')
except:
pass
def test_binary_write_read(self):
# GIVEN zipfs
z = self.z
# WHEN binary data is written to a test file in zipfs
f = z.open('test.data', 'wb')
f.write(self.test_content)
f.close()
z.close()
# THEN the same binary data is retrieved when opened again
z = ZipFS('test.zip', 'r')
f = z.open('test.data', 'rb')
content = f.read()
f.close()
z.close()
self.assertEqual(content, self.test_content)
if __name__ == '__main__':
unittest.main()
|
tests/test_transchex.py | dylanbuchi/MONAI | 2,971 | 12618543 | <filename>tests/test_transchex.py
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.networks import eval_mode
from monai.networks.nets.transchex import Transchex
TEST_CASE_TRANSCHEX = []
for drop_out in [0.4]:
for in_channels in [3]:
for img_size in [224]:
for patch_size in [16, 32]:
for num_language_layers in [2]:
for num_vision_layers in [4]:
for num_mixed_layers in [3]:
for num_classes in [8]:
test_case = [
{
"in_channels": in_channels,
"img_size": (img_size,) * 2,
"patch_size": (patch_size,) * 2,
"num_vision_layers": num_vision_layers,
"num_mixed_layers": num_mixed_layers,
"num_language_layers": num_language_layers,
"num_classes": num_classes,
"drop_out": drop_out,
},
(2, num_classes), # type: ignore
]
TEST_CASE_TRANSCHEX.append(test_case)
class TestPatchEmbeddingBlock(unittest.TestCase):
@parameterized.expand(TEST_CASE_TRANSCHEX)
def test_shape(self, input_param, expected_shape):
net = Transchex(**input_param)
with eval_mode(net):
result = net(torch.randint(2, (2, 512)), torch.randint(2, (2, 512)), torch.randn((2, 3, 224, 224)))
self.assertEqual(result.shape, expected_shape)
def test_ill_arg(self):
with self.assertRaises(ValueError):
Transchex(
in_channels=3,
img_size=(128, 128),
patch_size=(16, 16),
num_language_layers=2,
num_mixed_layers=4,
num_vision_layers=2,
num_classes=2,
drop_out=5.0,
)
with self.assertRaises(ValueError):
Transchex(
in_channels=1,
img_size=(97, 97),
patch_size=(16, 16),
num_language_layers=6,
num_mixed_layers=6,
num_vision_layers=8,
num_classes=8,
drop_out=0.4,
)
if __name__ == "__main__":
unittest.main()
|
Allura/allura/lib/decorators.py | rohankumardubey/allura | 113 | 12618590 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import inspect
import sys
import json
import logging
import six
if six.PY3:
from http.cookies import SimpleCookie as Cookie
else:
from Cookie import Cookie
from collections import defaultdict
from six.moves.urllib.parse import unquote
from datetime import datetime
from datetime import timedelta
from decorator import decorator
import wrapt
from paste.deploy.converters import asint
from tg.decorators import before_validate
from tg import request, redirect, session, config
from tg.render import render
from webob import exc
from tg import tmpl_context as c
from tg import response
from webob.exc import HTTPFound, WSGIHTTPException
from allura.lib import helpers as h
from allura.lib import utils
log = logging.getLogger(__name__)
def task(*args, **kw):
"""Decorator that adds a ``.post()`` function to the decorated callable.
Calling ``<original_callable>.post(*args, **kw)`` queues the callable for
execution by a background worker process. All parameters must be
BSON-serializable.
Example usage::
@task
def myfunc():
pass
@task(notifications_disabled=True)
def myotherfunc():
# No email notifications will be sent for c.project during this task
pass
"""
def task_(func):
def post(*args, **kwargs):
delay = kwargs.pop('delay', 0)
flush_immediately = kwargs.pop('flush_immediately', True)
project = getattr(c, 'project', None)
cm = (h.notifications_disabled if project and
kw.get('notifications_disabled') else h.null_contextmanager)
with cm(project):
from allura import model as M
return M.MonQTask.post(func, args, kwargs, delay=delay, flush_immediately=flush_immediately)
# if decorating a class, have to make it a staticmethod
# or it gets a spurious cls argument
func.post = staticmethod(post) if inspect.isclass(func) else post
return func
if len(args) == 1 and callable(args[0]):
return task_(args[0])
return task_
class event_handler(object):
'''Decorator to register event handlers'''
listeners = defaultdict(set)
def __init__(self, *topics):
self.topics = topics
def __call__(self, func):
for t in self.topics:
self.listeners[t].add(func)
return func
class require_post(object):
'''
A decorator to require controllers by accessed with a POST only. Use whenever data will be modified by a
controller, since that's what POST is good for. We have CSRF protection middleware on POSTs, too.
'''
def __init__(self, redir=None):
self.redir = redir
def __call__(self, func):
def check_method(remainder, params):
if request.method != 'POST':
if self.redir is not None:
redirect(self.redir)
raise exc.HTTPMethodNotAllowed(headers={str('Allow'): str('POST')})
before_validate(check_method)(func)
return func
@decorator
def reconfirm_auth(func, *args, **kwargs):
'''
A decorator to require the user to reconfirm their login. Useful for sensitive pages.
'''
from allura.lib.plugin import AuthenticationProvider
if request.POST.get('password'):
if AuthenticationProvider.get(request).validate_password(c.user, request.POST['password']):
session['auth-reconfirmed'] = datetime.utcnow()
session.save()
kwargs.pop('password', None)
else:
c.form_errors['password'] = 'Invalid password.'
allowed_timedelta = timedelta(seconds=asint(config.get('auth.reconfirm.seconds', 60)))
last_reconfirm = session.get('auth-reconfirmed', datetime.min)
if datetime.utcnow() - last_reconfirm <= allowed_timedelta:
return func(*args, **kwargs)
else:
return render({}, 'jinja', "allura:templates/reconfirm_auth.html")
def getattr_(obj, name, default_thunk):
"Similar to .setdefault in dictionaries."
try:
return getattr(obj, name)
except AttributeError:
default = default_thunk()
setattr(obj, name, default)
return default
@wrapt.decorator
def memoize(func, instance, args, kwargs):
"""
Cache the method's result, for the given args
"""
if instance is None:
# decorating a simple function
dic = getattr_(func, "_memoize_dic", dict)
else:
# decorating a method
dic = getattr_(instance, "_memoize_dic__{}".format(func.__name__), dict)
cache_key = (args, frozenset(list(kwargs.items())))
if cache_key in dic:
return dic[cache_key]
else:
result = func(*args, **kwargs)
dic[cache_key] = result
return result
def memoize_cleanup(obj):
"""
Remove any _memoize_dic_* keys (if obj is a dict/obj hybrid) that were created by @memoize on methods
"""
for k in list(obj.keys()):
if k.startswith('_memoize_dic'):
del obj[k]
def memorable_forget():
"""
Decorator to mark a controller action as needing to "forget" remembered input values on the next
page render, if we detect that the form post was processed successfully
"""
def _ok_to_forget(response, controller_result, raised):
"""
Look for signals that say it's probably ok to forget remembered inputs for the current form.
Checks here will need to be expanded for controller actions that behave differently
than others upon successful processing of their particular request
"""
# if there is a flash message with status "ok", then we can forget. If it is "error" we cannot.
if response.headers:
cookies = Cookie(response.headers.get('Set-Cookie', ''))
if cookies and 'webflash' in cookies:
webflash_value = json.loads(unquote(cookies['webflash'].value))
if webflash_value['status'] == 'ok':
return True
elif webflash_value['status'] == 'error':
return False
# if the controller raised a 302, we can assume the value will be remembered by the app
# if needed, and forget.
if raised and isinstance(raised, HTTPFound):
return True
return False
def forget(controller_result, raised=None):
"""
Check if the form's inputs can be forgotten, and set the cookie to forget if so.
:param res: the result of the controller action
:param raised: any error (redirect or exception) raised by the controller action
"""
if _ok_to_forget(response, controller_result, raised):
response.set_cookie('memorable_forget', request.path, secure=request.environ['beaker.session'].secure)
@decorator
def _inner(func, *args, **kwargs):
res, raised = (None, None)
try:
res = func(*args, **kwargs)
forget(res)
return res
except WSGIHTTPException as ex:
forget(None, ex)
raise ex
return _inner |
tests/system_health/test_system_status.py | lolyu/sonic-mgmt | 132 | 12618596 | <gh_stars>100-1000
import time
import pytest
from tests.common.utilities import wait_until
pytestmark = [
pytest.mark.topology('any')
]
def test_system_is_running(duthost):
def is_system_ready(duthost):
status = duthost.shell('sudo systemctl is-system-running', module_ignore_errors=True)['stdout']
return status != "starting"
if not wait_until(180, 10, 0, is_system_ready, duthost):
pytest.fail('Failed to find routed interface in 180 s')
|
src/args.py | imatge-upc/rsis | 132 | 12618614 | import argparse
def get_parser():
parser = argparse.ArgumentParser(description='RIASS')
## TRAINING parameters ##
parser.add_argument('--resume', dest='resume',action='store_true',
help=('whether to resume training an existing model '
'(the one with name model_name will be used)'))
parser.set_defaults(resume=False)
# set epoch_resume if you want flags --finetune_after and --update_encoder to be properly
# activated (eg if you stop training for whatever reason at epoch 15, set epoch_resume to 15)
parser.add_argument('-epoch_resume', dest='epoch_resume',default= 0,type=int,
help=('set epoch_resume if you want flags '
'--finetune_after and --update_encoder to be properly '
'activated (eg if you stop training for whatever reason '
'at epoch 15, set epoch_resume to 15)'))
parser.add_argument('-seed', dest='seed',default = 123, type=int)
parser.add_argument('-batch_size', dest='batch_size', default = 28, type=int)
parser.add_argument('-lr', dest='lr', default = 1e-3,type=float)
parser.add_argument('-lr_cnn', dest='lr_cnn', default = 1e-6,type=float)
parser.add_argument('-optim_cnn', dest='optim_cnn', default = 'adam',
choices=['adam','sgd','rmsprop'])
parser.add_argument('-momentum', dest='momentum', default =0.9,type=float)
parser.add_argument('-weight_decay', dest='weight_decay', default = 1e-6, type=float)
parser.add_argument('-weight_decay_cnn', dest='weight_decay_cnn', default = 1e-6, type=float)
parser.add_argument('-optim', dest='optim', default = 'adam',
choices=['adam','sgd','rmsprop'])
parser.add_argument('-maxseqlen', dest='maxseqlen', default = 10, type=int)
parser.add_argument('-gt_maxseqlen', dest='gt_maxseqlen', default = 20, type=int)
parser.add_argument('-best_val_loss', dest='best_val_loss', default = 1000, type=float)
parser.add_argument('--crop', dest='crop', action='store_true')
parser.set_defaults(crop=False)
parser.add_argument('--smooth_curves',dest='smooth_curves', action='store_true')
parser.set_defaults(smooth_curves=False)
# base model fine tuning
parser.add_argument('-finetune_after', dest='finetune_after', default = 0, type=int,
help=('epoch number to start finetuning. set -1 to not finetune.'
'there is a patience term that can allow starting to fine tune '
'earlier (does not apply if value is -1)'))
parser.add_argument('--update_encoder', dest='update_encoder', action='store_true',
help='used in sync with finetune_after. no need to activate.')
parser.set_defaults(update_encoder=False)
parser.add_argument('--transfer',dest='transfer', action='store_true')
parser.set_defaults(transfer=False)
parser.add_argument('-transfer_from', dest='transfer_from', default = 'model')
parser.add_argument('--curriculum_learning',dest='curriculum_learning', action='store_true')
parser.set_defaults(curriculum_learning=False)
parser.add_argument('-steps_cl', dest='steps_cl', default=1, type=int)
parser.add_argument('-min_steps', dest='min_steps', default=1, type=int)
parser.add_argument('-min_delta', dest='min_delta', default=0.0, type=float)
# Cross entropy loss
parser.add_argument('-class_loss_after', dest='class_loss_after', default=20, type=int,
help=('epoch number to start training the classification loss. '
'set to -1 to not do it. A patience term can allow to start '
'training with this loss (does not apply if value is -1)'))
parser.add_argument('--use_class_loss', dest='use_class_loss', action='store_true')
parser.set_defaults(use_class_loss=False)
parser.add_argument('-stop_loss_after', dest='stop_loss_after', default = 3000, type=int,
help=('epoch number to start training the stopping loss. '
'set to -1 to not do it. A patience term can allow to start '
'training with this loss (does not apply if value is -1)'))
parser.add_argument('--use_stop_loss', dest='use_stop_loss', action = 'store_true')
parser.set_defaults(use_stop_loss=False)
# stopping criterion
parser.add_argument('-patience', dest='patience', default = 15, type=int,
help=('patience term to activate flags such as '
'use_class_loss, feed_prediction and update_encoder if '
'their matching vars are not -1'))
parser.add_argument('-patience_stop', dest='patience_stop', default = 60, type=int,
help='patience to stop training.')
parser.add_argument('-max_epoch', dest='max_epoch', default = 4000, type=int)
# visualization and logging
parser.add_argument('-print_every', dest='print_every', default = 10, type=int)
parser.add_argument('--log_term', dest='log_term', action='store_true',
help='if activated, will show logs in stdout instead of log file.')
parser.set_defaults(log_term=False)
parser.add_argument('--visdom', dest='visdom', action='store_true')
parser.set_defaults(visdom=False)
parser.add_argument('-port',dest='port',default=8097, type=int, help='visdom port')
parser.add_argument('-server',dest='server',default='http://localhost', help='visdom server')
# loss weights
parser.add_argument('-class_weight',dest='class_weight',default=0.1, type=float)
parser.add_argument('-iou_weight',dest='iou_weight',default=1.0, type=float)
parser.add_argument('-stop_weight',dest='stop_weight',default=0.5, type=float)
parser.add_argument('-stop_balance_weight',dest='stop_balance_weight',default=0.5, type=float)
# augmentation
parser.add_argument('--augment', dest='augment', action='store_true')
parser.set_defaults(augment=False)
parser.add_argument('-rotation', dest='rotation', default = 10, type=int)
parser.add_argument('-translation', dest='translation', default = 0.1, type=float)
parser.add_argument('-shear', dest='shear', default = 0.1, type=float)
parser.add_argument('-zoom', dest='zoom', default = 0.7, type=float)
# GPU
parser.add_argument('--cpu', dest='use_gpu', action='store_false')
parser.set_defaults(use_gpu=True)
parser.add_argument('-ngpus', dest='ngpus', default=1,type=int)
parser.add_argument('-base_model', dest='base_model', default = 'resnet101',
choices=['resnet101','resnet50','resnet34','vgg16'])
parser.add_argument('-skip_mode', dest='skip_mode', default = 'concat',
choices=['sum','concat','mul','none'])
parser.add_argument('-model_name', dest='model_name', default='model')
parser.add_argument('-log_file', dest='log_file', default='train.log')
parser.add_argument('-hidden_size', dest='hidden_size', default = 128, type=int)
parser.add_argument('-kernel_size', dest='kernel_size', default = 3, type=int)
parser.add_argument('-dropout', dest='dropout', default = 0.0, type=float)
parser.add_argument('-dropout_stop', dest='dropout_stop', default = 0.0, type=float)
parser.add_argument('-dropout_cls', dest='dropout_cls', default = 0.0, type=float)
# dataset parameters
parser.add_argument('-imsize',dest='imsize', default=256, type=int)
parser.add_argument('--resize',dest='resize', action='store_true')
parser.set_defaults(resize=False)
parser.add_argument('-num_classes', dest='num_classes', default = 21, type=int)
parser.add_argument('-dataset', dest='dataset', default = 'pascal',choices=['pascal','cityscapes', 'leaves'])
parser.add_argument('-pascal_dir', dest='pascal_dir',
default = '/work/asalvador/dev/data/rsis/VOCAug/')
parser.add_argument('-cityscapes_dir', dest='cityscapes_dir',
default='/gpfs/scratch/bsc31/bsc31429/CityScapes/')
parser.add_argument('-leaves_dir', dest='leaves_dir',
default='/gpfs/scratch/bsc31/bsc31429/LeavesDataset/A1/')
parser.add_argument('-leaves_test_dir', dest='leaves_test_dir',
default = '/gpfs/scratch/bsc31/bsc31429/CVPPP2014_LSC_testing_data/A1/')
parser.add_argument('-num_workers', dest='num_workers', default = 4, type=int)
# testing
parser.add_argument('-eval_split',dest='eval_split', default='test')
parser.add_argument('-mask_th',dest='mask_th', default=0.5, type=float)
parser.add_argument('-stop_th',dest='stop_th', default=0.5, type=float)
parser.add_argument('-class_th',dest='class_th', default=0.5, type=float)
parser.add_argument('-max_dets',dest='max_dets', default=100, type=int)
parser.add_argument('-min_size',dest='min_size', default=0.001, type=float)
parser.add_argument('-cat_id',dest='cat_id', default=-1,type=int)
parser.add_argument('--ignore_cats',dest='use_cats', action='store_false')
parser.add_argument('--display', dest='display', action='store_true')
parser.add_argument('--no_display_text', dest='no_display_text', action='store_true')
parser.add_argument('--all_classes',dest='all_classes', action='store_true')
parser.add_argument('--no_run_coco_eval',dest='no_run_coco_eval', action='store_true')
parser.add_argument('--display_route', dest='display_route', action='store_true')
parser.set_defaults(display=False)
parser.set_defaults(display_route=False)
parser.set_defaults(use_cats=True)
parser.set_defaults(all_classes=False)
parser.set_defaults(no_display_text=False)
parser.set_defaults(use_gt_cats=False)
parser.set_defaults(use_gt_masks=False)
parser.set_defaults(use_gt_stop=False)
return parser
if __name__ =="__main__":
parser = get_parser()
args_dict = parser.parse_args()
|
twisted/test/test_defgen.py | hawkowl/twisted | 9,953 | 12618677 | <gh_stars>1000+
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.defer.deferredGenerator} and related APIs.
"""
from __future__ import division, absolute_import
from twisted.internet import reactor
from twisted.trial import unittest
from twisted.internet.defer import waitForDeferred, deferredGenerator, Deferred
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import defer
from twisted.trial.util import suppress as SUPPRESS
from twisted.python.util import runWithWarningsSuppressed
def getThing():
d = Deferred()
reactor.callLater(0, d.callback, "hi")
return d
def getOwie():
d = Deferred()
def CRAP():
d.errback(ZeroDivisionError('OMG'))
reactor.callLater(0, CRAP)
return d
# NOTE: most of the tests in DeferredGeneratorTests are duplicated
# with slightly different syntax for the InlineCallbacksTests below.
class TerminalException(Exception):
pass
class BaseDefgenTests:
"""
This class sets up a bunch of test cases which will test both
deferredGenerator and inlineCallbacks based generators. The subclasses
DeferredGeneratorTests and InlineCallbacksTests each provide the actual
generator implementations tested.
"""
def testBasics(self):
"""
Test that a normal deferredGenerator works. Tests yielding a
deferred which callbacks, as well as a deferred errbacks. Also
ensures returning a final value works.
"""
return self._genBasics().addCallback(self.assertEqual, 'WOOSH')
def testBuggy(self):
"""
Ensure that a buggy generator properly signals a Failure
condition on result deferred.
"""
return self.assertFailure(self._genBuggy(), ZeroDivisionError)
def testNothing(self):
"""Test that a generator which never yields results in None."""
return self._genNothing().addCallback(self.assertEqual, None)
def testHandledTerminalFailure(self):
"""
Create a Deferred Generator which yields a Deferred which fails and
handles the exception which results. Assert that the Deferred
Generator does not errback its Deferred.
"""
return self._genHandledTerminalFailure().addCallback(self.assertEqual, None)
def testHandledTerminalAsyncFailure(self):
"""
Just like testHandledTerminalFailure, only with a Deferred which fires
asynchronously with an error.
"""
d = defer.Deferred()
deferredGeneratorResultDeferred = self._genHandledTerminalAsyncFailure(d)
d.errback(TerminalException("Handled Terminal Failure"))
return deferredGeneratorResultDeferred.addCallback(
self.assertEqual, None)
def testStackUsage(self):
"""
Make sure we don't blow the stack when yielding immediately
available deferreds.
"""
return self._genStackUsage().addCallback(self.assertEqual, 0)
def testStackUsage2(self):
"""
Make sure we don't blow the stack when yielding immediately
available values.
"""
return self._genStackUsage2().addCallback(self.assertEqual, 0)
def deprecatedDeferredGenerator(f):
"""
Calls L{deferredGenerator} while suppressing the deprecation warning.
@param f: Function to call
@return: Return value of function.
"""
return runWithWarningsSuppressed(
[ SUPPRESS(message="twisted.internet.defer.deferredGenerator was "
"deprecated") ],
deferredGenerator, f)
class DeferredGeneratorTests(BaseDefgenTests, unittest.TestCase):
# First provide all the generator impls necessary for BaseDefgenTests
@deprecatedDeferredGenerator
def _genBasics(self):
x = waitForDeferred(getThing())
yield x
x = x.getResult()
self.assertEqual(x, "hi")
ow = waitForDeferred(getOwie())
yield ow
try:
ow.getResult()
except ZeroDivisionError as e:
self.assertEqual(str(e), 'OMG')
yield "WOOSH"
return
@deprecatedDeferredGenerator
def _genBuggy(self):
yield waitForDeferred(getThing())
1//0
@deprecatedDeferredGenerator
def _genNothing(self):
if 0: yield 1
@deprecatedDeferredGenerator
def _genHandledTerminalFailure(self):
x = waitForDeferred(defer.fail(TerminalException("Handled Terminal Failure")))
yield x
try:
x.getResult()
except TerminalException:
pass
@deprecatedDeferredGenerator
def _genHandledTerminalAsyncFailure(self, d):
x = waitForDeferred(d)
yield x
try:
x.getResult()
except TerminalException:
pass
def _genStackUsage(self):
for x in range(5000):
# Test with yielding a deferred
x = waitForDeferred(defer.succeed(1))
yield x
x = x.getResult()
yield 0
_genStackUsage = deprecatedDeferredGenerator(_genStackUsage)
def _genStackUsage2(self):
for x in range(5000):
# Test with yielding a random value
yield 1
yield 0
_genStackUsage2 = deprecatedDeferredGenerator(_genStackUsage2)
# Tests unique to deferredGenerator
def testDeferredYielding(self):
"""
Ensure that yielding a Deferred directly is trapped as an
error.
"""
# See the comment _deferGenerator about d.callback(Deferred).
def _genDeferred():
yield getThing()
_genDeferred = deprecatedDeferredGenerator(_genDeferred)
return self.assertFailure(_genDeferred(), TypeError)
suppress = [
SUPPRESS(message='twisted.internet.defer.waitForDeferred was '
'deprecated')
]
class InlineCallbacksTests(BaseDefgenTests, unittest.TestCase):
# First provide all the generator impls necessary for BaseDefgenTests
def _genBasics(self):
x = yield getThing()
self.assertEqual(x, "hi")
try:
yield getOwie()
except ZeroDivisionError as e:
self.assertEqual(str(e), 'OMG')
returnValue("WOOSH")
_genBasics = inlineCallbacks(_genBasics)
def _genBuggy(self):
yield getThing()
1/0
_genBuggy = inlineCallbacks(_genBuggy)
def _genNothing(self):
if 0: yield 1
_genNothing = inlineCallbacks(_genNothing)
def _genHandledTerminalFailure(self):
try:
yield defer.fail(TerminalException("Handled Terminal Failure"))
except TerminalException:
pass
_genHandledTerminalFailure = inlineCallbacks(_genHandledTerminalFailure)
def _genHandledTerminalAsyncFailure(self, d):
try:
yield d
except TerminalException:
pass
_genHandledTerminalAsyncFailure = inlineCallbacks(
_genHandledTerminalAsyncFailure)
def _genStackUsage(self):
for x in range(5000):
# Test with yielding a deferred
yield defer.succeed(1)
returnValue(0)
_genStackUsage = inlineCallbacks(_genStackUsage)
def _genStackUsage2(self):
for x in range(5000):
# Test with yielding a random value
yield 1
returnValue(0)
_genStackUsage2 = inlineCallbacks(_genStackUsage2)
# Tests unique to inlineCallbacks
def testYieldNonDeferred(self):
"""
Ensure that yielding a non-deferred passes it back as the
result of the yield expression.
@return: A L{twisted.internet.defer.Deferred}
@rtype: L{twisted.internet.defer.Deferred}
"""
def _test():
yield 5
returnValue(5)
_test = inlineCallbacks(_test)
return _test().addCallback(self.assertEqual, 5)
def testReturnNoValue(self):
"""Ensure a standard python return results in a None result."""
def _noReturn():
yield 5
return
_noReturn = inlineCallbacks(_noReturn)
return _noReturn().addCallback(self.assertEqual, None)
def testReturnValue(self):
"""Ensure that returnValue works."""
def _return():
yield 5
returnValue(6)
_return = inlineCallbacks(_return)
return _return().addCallback(self.assertEqual, 6)
def test_nonGeneratorReturn(self):
"""
Ensure that C{TypeError} with a message about L{inlineCallbacks} is
raised when a non-generator returns something other than a generator.
"""
def _noYield():
return 5
_noYield = inlineCallbacks(_noYield)
self.assertIn("inlineCallbacks",
str(self.assertRaises(TypeError, _noYield)))
def test_nonGeneratorReturnValue(self):
"""
Ensure that C{TypeError} with a message about L{inlineCallbacks} is
raised when a non-generator calls L{returnValue}.
"""
def _noYield():
returnValue(5)
_noYield = inlineCallbacks(_noYield)
self.assertIn("inlineCallbacks",
str(self.assertRaises(TypeError, _noYield)))
class DeprecateDeferredGeneratorTests(unittest.SynchronousTestCase):
"""
Tests that L{DeferredGeneratorTests} and L{waitForDeferred} are
deprecated.
"""
def test_deferredGeneratorDeprecated(self):
"""
L{deferredGenerator} is deprecated.
"""
@deferredGenerator
def decoratedFunction():
yield None
warnings = self.flushWarnings([self.test_deferredGeneratorDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"twisted.internet.defer.deferredGenerator was deprecated in "
"Twisted 15.0.0; please use "
"twisted.internet.defer.inlineCallbacks instead")
def test_waitForDeferredDeprecated(self):
"""
L{waitForDeferred} is deprecated.
"""
d = Deferred()
waitForDeferred(d)
warnings = self.flushWarnings([self.test_waitForDeferredDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"twisted.internet.defer.waitForDeferred was deprecated in "
"Twisted 15.0.0; please use "
"twisted.internet.defer.inlineCallbacks instead")
|
deeppavlov/models/go_bot/nlu/dto/text_vectorization_response.py | xbodx/DeepPavlov | 5,893 | 12618683 | class TextVectorizationResponse:
"""
Stores the BOW-encodings and (padded or aggregated e.g. averaged) embeddings for text.
"""
def __init__(self, tokens_bow_encoded, tokens_aggregated_embedding, tokens_embeddings_padded):
self.tokens_bow_encoded = tokens_bow_encoded
self.tokens_aggregated_embedding = tokens_aggregated_embedding
self.tokens_embeddings_padded = tokens_embeddings_padded
|
src/sage/categories/commutative_rings.py | UCD4IDS/sage | 1,742 | 12618688 | <reponame>UCD4IDS/sage
r"""
Commutative rings
"""
# ****************************************************************************
# Copyright (C) 2005 <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# 2008 <NAME> (CNRS) <<EMAIL>>
# 2008-2013 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# *****************************************************************************
from sage.categories.category_with_axiom import CategoryWithAxiom
from sage.categories.cartesian_product import CartesianProductsCategory
class CommutativeRings(CategoryWithAxiom):
"""
The category of commutative rings
commutative rings with unity, i.e. rings with commutative * and
a multiplicative identity
EXAMPLES::
sage: C = CommutativeRings(); C
Category of commutative rings
sage: C.super_categories()
[Category of rings, Category of commutative monoids]
TESTS::
sage: TestSuite(C).run()
sage: QQ['x,y,z'] in CommutativeRings()
True
sage: GroupAlgebra(DihedralGroup(3), QQ) in CommutativeRings()
False
sage: MatrixSpace(QQ,2,2) in CommutativeRings()
False
GroupAlgebra should be fixed::
sage: GroupAlgebra(CyclicPermutationGroup(3), QQ) in CommutativeRings() # todo: not implemented
True
"""
class ParentMethods:
def _test_divides(self, **options):
r"""
Run generic tests on the method :meth:`divides`.
EXAMPLES::
sage: ZZ._test_divides()
"""
tester = self._tester(**options)
# 1. is there a divides method ?
a = self.an_element()
try:
a.divides
except AttributeError:
return
# 2. divisibility of 0 and 1
z = self.zero()
o = self.one()
tester.assertTrue(z.divides(z))
tester.assertTrue(o.divides(o))
tester.assertTrue(o.divides(z))
tester.assertIs(z.divides(o), self.is_zero())
if not self.is_exact():
return
# 3. divisibility of some elements
for a, b in tester.some_elements(repeat=2):
try:
test = a.divides(a * b)
except NotImplementedError:
pass
else:
tester.assertTrue(test)
def over(self, base=None, gen=None, gens=None, name=None, names=None):
r"""
Return this ring, considered as an extension of ``base``.
INPUT:
- ``base`` -- a commutative ring or a morphism or ``None``
(default: ``None``); the base of this extension or its defining
morphism
- ``gen`` -- a generator of this extension (over its base) or ``None``
(default: ``None``);
- ``gens`` -- a list of generators of this extension (over its base)
or ``None`` (default: ``None``);
- ``name`` -- a variable name or ``None`` (default: ``None``)
- ``names`` -- a list or a tuple of variable names or ``None``
(default: ``None``)
EXAMPLES:
We construct an extension of finite fields::
sage: F = GF(5^2)
sage: k = GF(5^4)
sage: z4 = k.gen()
sage: K = k.over(F)
sage: K
Field in z4 with defining polynomial x^2 + (4*z2 + 3)*x + z2 over its base
If not explicitly given, the default generator of the top ring
(here k) is used and the same name is kept::
sage: K.gen()
z4
sage: K(z4)
z4
However, it is possible to specify another generator and/or
another name. For example::
sage: Ka = k.over(F, name='a')
sage: Ka
Field in a with defining polynomial x^2 + (4*z2 + 3)*x + z2 over its base
sage: Ka.gen()
a
sage: Ka(z4)
a
sage: Kb = k.over(F, gen=-z4+1, name='b')
sage: Kb
Field in b with defining polynomial x^2 + z2*x + 4 over its base
sage: Kb.gen()
b
sage: Kb(-z4+1)
b
Note that the shortcut ``K.<a>`` is also available::
sage: KKa.<a> = k.over(F)
sage: KKa is Ka
True
Building an extension on top of another extension is allowed::
sage: L = GF(5^12).over(K)
sage: L
Field in z12 with defining polynomial x^3 + (1 + (4*z2 + 2)*z4)*x^2 + (2 + 2*z4)*x - z4 over its base
sage: L.base_ring()
Field in z4 with defining polynomial x^2 + (4*z2 + 3)*x + z2 over its base
The successive bases of an extension are accessible via the
method :meth:`sage.rings.ring_extension.RingExtension_generic.bases`::
sage: L.bases()
[Field in z12 with defining polynomial x^3 + (1 + (4*z2 + 2)*z4)*x^2 + (2 + 2*z4)*x - z4 over its base,
Field in z4 with defining polynomial x^2 + (4*z2 + 3)*x + z2 over its base,
Finite Field in z2 of size 5^2]
When ``base`` is omitted, the canonical base of the ring is used::
sage: S.<x> = QQ[]
sage: E = S.over()
sage: E
Univariate Polynomial Ring in x over Rational Field over its base
sage: E.base_ring()
Rational Field
Here is an example where ``base`` is a defining morphism::
sage: k.<a> = QQ.extension(x^2 - 2)
sage: l.<b> = QQ.extension(x^4 - 2)
sage: f = k.hom([b^2])
sage: L = l.over(f)
sage: L
Field in b with defining polynomial x^2 - a over its base
sage: L.base_ring()
Number Field in a with defining polynomial x^2 - 2
Similarly, one can create a tower of extensions::
sage: K = k.over()
sage: L = l.over(Hom(K,l)(f))
sage: L
Field in b with defining polynomial x^2 - a over its base
sage: L.base_ring()
Field in a with defining polynomial x^2 - 2 over its base
sage: L.bases()
[Field in b with defining polynomial x^2 - a over its base,
Field in a with defining polynomial x^2 - 2 over its base,
Rational Field]
"""
from sage.rings.ring_extension import RingExtension
if name is not None:
if names is not None:
raise ValueError("keyword argument 'name' cannot be combined with 'names'")
names = (name,)
if gen is not None:
if gens is not None:
raise ValueError("keyword argument 'gen' cannot be combined with 'gens'")
gens = (gen,)
return RingExtension(self, base, gens, names)
class ElementMethods:
pass
class Finite(CategoryWithAxiom):
r"""
Check that Sage knows that Cartesian products of finite commutative
rings is a finite commutative ring.
EXAMPLES::
sage: cartesian_product([Zmod(34), GF(5)]) in Rings().Commutative().Finite()
True
"""
class ParentMethods:
def cyclotomic_cosets(self, q, cosets=None):
r"""
Return the (multiplicative) orbits of ``q`` in the ring.
Let `R` be a finite commutative ring. The group of invertible
elements `R^*` in `R` gives rise to a group action on `R` by
multiplication. An orbit of the subgroup generated by an
invertible element `q` is called a `q`-*cyclotomic coset* (since
in a finite ring, each invertible element is a root of unity).
These cosets arise in the theory of minimal polynomials of
finite fields, duadic codes and combinatorial designs. Fix a
primitive element `z` of `GF(q^k)`. The minimal polynomial of
`z^s` over `GF(q)` is given by
.. MATH::
M_s(x) = \prod_{i \in C_s} (x - z^i),
where `C_s` is the `q`-cyclotomic coset mod `n` containing `s`,
`n = q^k - 1`.
.. NOTE::
When `R = \ZZ / n \ZZ` the smallest element of each coset is
sometimes called a *coset leader*. This function returns
sorted lists so that the coset leader will always be the
first element of the coset.
INPUT:
- ``q`` -- an invertible element of the ring
- ``cosets`` -- an optional lists of elements of ``self``. If
provided, the function only return the list of cosets that
contain some element from ``cosets``.
OUTPUT:
A list of lists.
EXAMPLES::
sage: Zmod(11).cyclotomic_cosets(2)
[[0], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
sage: Zmod(15).cyclotomic_cosets(2)
[[0], [1, 2, 4, 8], [3, 6, 9, 12], [5, 10], [7, 11, 13, 14]]
Since the group of invertible elements of a finite field is
cyclic, the set of squares is a particular case of cyclotomic
coset::
sage: K = GF(25,'z')
sage: a = K.multiplicative_generator()
sage: K.cyclotomic_cosets(a**2,cosets=[1])
[[1, 2, 3, 4, z + 1, z + 3,
2*z + 1, 2*z + 2, 3*z + 3,
3*z + 4, 4*z + 2, 4*z + 4]]
sage: sorted(b for b in K if not b.is_zero() and b.is_square())
[1, 2, 3, 4, z + 1, z + 3,
2*z + 1, 2*z + 2, 3*z + 3,
3*z + 4, 4*z + 2, 4*z + 4]
We compute some examples of minimal polynomials::
sage: K = GF(27,'z')
sage: a = K.multiplicative_generator()
sage: R.<X> = PolynomialRing(K, 'X')
sage: a.minimal_polynomial('X')
X^3 + 2*X + 1
sage: cyc3 = Zmod(26).cyclotomic_cosets(3,cosets=[1]); cyc3
[[1, 3, 9]]
sage: prod(X - a**i for i in cyc3[0])
X^3 + 2*X + 1
sage: (a**7).minimal_polynomial('X')
X^3 + X^2 + 2*X + 1
sage: cyc7 = Zmod(26).cyclotomic_cosets(3,cosets=[7]); cyc7
[[7, 11, 21]]
sage: prod(X - a**i for i in cyc7[0])
X^3 + X^2 + 2*X + 1
Cyclotomic cosets of fields are useful in combinatorial design
theory to provide so called difference families (see
:wikipedia:`Difference_set` and
:mod:`~sage.combinat.designs.difference_family`). This is
illustrated on the following examples::
sage: K = GF(5)
sage: a = K.multiplicative_generator()
sage: H = K.cyclotomic_cosets(a**2, cosets=[1,2]); H
[[1, 4], [2, 3]]
sage: sorted(x-y for D in H for x in D for y in D if x != y)
[1, 2, 3, 4]
sage: K = GF(37)
sage: a = K.multiplicative_generator()
sage: H = K.cyclotomic_cosets(a**4, cosets=[1]); H
[[1, 7, 9, 10, 12, 16, 26, 33, 34]]
sage: sorted(x-y for D in H for x in D for y in D if x != y)
[1, 1, 2, 2, 3, 3, 4, 4, 5, 5, ..., 33, 34, 34, 35, 35, 36, 36]
The method ``cyclotomic_cosets`` works on any finite commutative
ring::
sage: R = cartesian_product([GF(7), Zmod(14)])
sage: a = R((3,5))
sage: R.cyclotomic_cosets((3,5), [(1,1)])
[[(1, 1), (2, 11), (3, 5), (4, 9), (5, 3), (6, 13)]]
"""
q = self(q)
try:
~q
except ZeroDivisionError:
raise ValueError("%s is not invertible in %s"%(q,self))
if cosets is None:
rest = set(self)
else:
rest = set(self(x) for x in cosets)
orbits = []
while rest:
x0 = rest.pop()
o = [x0]
x = q*x0
while x != x0:
o.append(x)
rest.discard(x)
x *= q
o.sort()
orbits.append(o)
orbits.sort()
return orbits
class CartesianProducts(CartesianProductsCategory):
def extra_super_categories(self):
r"""
Let Sage knows that Cartesian products of commutative rings is a
commutative ring.
EXAMPLES::
sage: CommutativeRings().Commutative().CartesianProducts().extra_super_categories()
[Category of commutative rings]
sage: cartesian_product([ZZ, Zmod(34), QQ, GF(5)]) in CommutativeRings()
True
"""
return [CommutativeRings()]
|
device/usb/tools/usb_ids.py | kjthegod/chromium | 231 | 12618697 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import optparse
import re
VENDOR_PATTERN = re.compile("^(?P<id>[0-9a-fA-F]{4})\s+(?P<name>.+)$")
PRODUCT_PATTERN = re.compile("^\t(?P<id>[0-9a-fA-F]{4})\s+(?P<name>.+)$")
def EscapeName(name):
name = name.replace("\\", "\\\\")
name = name.replace("\"", "\\\"")
name = name.replace("?", "\?")
return name
def ParseTable(input_path):
input_file = open(input_path, "r")
input = input_file.read().split("\n")
input_file.close()
table = {}
vendor = None
for line in input:
vendor_match = VENDOR_PATTERN.match(line)
if vendor_match:
if vendor:
table[vendor["id"]] = vendor
vendor = {}
vendor["id"] = int(vendor_match.group("id"), 16)
vendor["name"] = vendor_match.group("name")
vendor["products"] = []
continue
product_match = PRODUCT_PATTERN.match(line)
if product_match:
if not vendor:
raise Exception("Product seems to appear before vendor.")
product = {}
product["id"] = int(product_match.group("id"), 16)
product["name"] = product_match.group("name")
vendor["products"].append(product)
return table
def GenerateDeviceDefinitions(table):
output = ""
for vendor_id in sorted(table.keys()):
vendor = table[vendor_id]
if len(vendor["products"]) == 0:
continue
output += "static const UsbProduct vendor_%.4x_products[] = {\n" % \
vendor["id"]
for product in vendor["products"]:
output += " {0x%.4x, \"%s\"},\n" % (product["id"],
EscapeName(product["name"]))
output += "};\n"
return output
def GenerateVendorDefinitions(table):
output = "const size_t UsbIds::vendor_size_ = %d;\n" % len(table.keys())
output += "const UsbVendor UsbIds::vendors_[] = {\n"
for vendor_id in sorted(table.keys()):
vendor = table[vendor_id]
product_table = "NULL"
if len(vendor["products"]) != 0:
product_table = "vendor_%.4x_products" % (vendor["id"])
output += " {0x%.4x, \"%s\", %d, %s},\n" % (vendor["id"],
EscapeName(vendor["name"]), len(vendor["products"]), product_table)
output += "};\n"
return output
if __name__ == "__main__":
parser = optparse.OptionParser(
description="Generates a C++ USB ID lookup table.")
parser.add_option("-i", "--input", help="Path to usb.ids")
parser.add_option("-o", "--output", help="Output file path")
(opts, args) = parser.parse_args()
table = ParseTable(opts.input)
output = """// Generated from %s
#ifndef GENERATED_USB_IDS_H_
#define GENERATED_USB_IDS_H_
#include "device/usb/usb_ids.h"
namespace device {
""" % (opts.input)
output += GenerateDeviceDefinitions(table)
output += GenerateVendorDefinitions(table)
output += """
} // namespace device
#endif // GENERATED_USB_IDS_H_
"""
output_file = open(opts.output, "w+")
output_file.write(output)
output_file.close()
|
django/CVE-2021-35042/web/vuln/models.py | nobgr/vulhub | 9,681 | 12618700 | from django.db import models
# Create your models here.
class Collection(models.Model):
name = models.CharField(max_length=128)
|
ansible/roles/lib_git/build/lib/base.py | fahlmant/openshift-tools | 164 | 12618720 | # pylint: skip-file
# pylint: disable=too-many-lines
# these are already imported inside of the ssh library
#import os
#import subprocess
class GitCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GitCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
path,
verbose=False,
ssh_key=None,
author=None):
''' Constructor for GitCLI '''
self.path = path
self.verbose = verbose
self.ssh_key = ssh_key
self.author = author
self.environment_vars = os.environ.copy()
if self.author:
author_dict = {}
author_list = author.split('<')
author_dict['GIT_COMMITTER_NAME'] = author_list[0].strip()
author_dict['GIT_COMMITTER_EMAIL'] = author_list[0].strip()
self.environment_vars.update(author_dict)
def _add(self, files_to_add=None):
''' git add '''
cmd = ["add", "--no-ignore-removal"]
if files_to_add:
cmd.extend(files_to_add)
else:
cmd.append('.')
results = self.git_cmd(cmd)
return results
def _commit(self, msg, author=None):
''' git commit with message '''
cmd = ["commit", "-m", msg]
if author:
cmd += ["--author", author]
results = self.git_cmd(cmd)
return results
def _clone(self, repo, dest, bare=False):
''' git clone '''
cmd = ["clone"]
if bare:
cmd += ["--bare"]
cmd += [repo, dest]
results = self.git_cmd(cmd)
return results
def _fetch(self, remote):
''' git fetch '''
cmd = ["fetch"]
cmd += [remote]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _status(self, porcelain=False, show_untracked=True):
''' Do a git status '''
cmd = ["status"]
if porcelain:
cmd.append('--porcelain')
if show_untracked:
cmd.append('--untracked-files=normal')
else:
cmd.append('--untracked-files=no')
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _checkout(self, branch):
''' Do a git checkout to <branch> '''
cmd = ["checkout", branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _get_current_branch(self):
''' Do a git checkout to <branch> '''
cmd = ["describe", "--contains", "--all", "HEAD"]
results = self.git_cmd(cmd, output=True, output_type='raw')
results['results'] = results['results'].rstrip()
return results
def _merge(self, merge_id):
''' Do a git checkout to <branch> '''
cmd = ["merge", merge_id]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _push(self, remote, src_branch, dest_branch):
''' Do a git checkout to <branch> '''
push_branches = src_branch + ":" + dest_branch
cmd = ["push", remote, push_branches]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _remote_update(self):
''' Do a git remote update '''
cmd = ["remote", "update"]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _diff(self, diff_branch):
''' Do a git diff diff_branch'''
cmd = ["diff", diff_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _rebase(self, rebase_branch):
''' Do a git rebase rebase_branch'''
cmd = ["rebase", rebase_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _config(self, get_args):
''' Do a git config --get <get_args> '''
cmd = ["config", '--get', get_args]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def git_cmd(self, cmd, output=False, output_type='json'):
'''Base command for git '''
cmds = ['/usr/bin/git']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
if self.ssh_key:
with SshAgent() as agent:
self.environment_vars['SSH_AUTH_SOCK'] = os.environ['SSH_AUTH_SOCK']
agent.add_key(self.ssh_key)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
else:
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"cmd": cmds
})
else:
rval.update({"results": {}})
# Always include stdout/stderr:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
|
CodeIA/venv/Lib/site-packages/coremltools/converters/mil/backend/nn/passes/test_passes.py | Finasty-lab/IA-Python | 11,356 | 12618724 | <filename>CodeIA/venv/Lib/site-packages/coremltools/converters/mil/backend/nn/passes/test_passes.py
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import copy
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
from coremltools.converters.mil.testing_utils import assert_model_is_valid
from coremltools.converters.mil.testing_utils import assert_same_output_names
def test_commingle_loop_vars():
def body(a, b):
# b is a loop invariant
return mb.add(x=a, y=b), b
def cond(a, b):
a_mean = mb.reduce_mean(x=a, axes=[0, 1])
b_mean = mb.reduce_mean(x=b, axes=[0, 1])
return mb.less(x=a_mean, y=b_mean)
@mb.program(
input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),]
)
def prog(a, b):
return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b))
while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0]
assert while_op.blocks[0].inputs[0].name == "a.x"
assert while_op.blocks[0].inputs[1].name == "b.x"
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["nn_backend::commingle_loop_vars"](prog)
assert_same_output_names(prev_prog, prog)
while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0]
assert while_op.blocks[0].inputs[0].name == while_op.outputs[0].name
assert while_op.blocks[0].inputs[1].name == while_op.outputs[1].name
prog.validate()
# The program is not ssa and thus cannot be converted
def test_handle_return_return_inputs_as_outputs():
@mb.program(
input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),]
)
def prog(a, b):
return mb.mul(x=a, y=2), b
prev_main_output_names = [o.name for o in prog["main"].outputs]
assert prog["main"].outputs[1].op is None # output comes from input
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["nn_backend::handle_return_inputs_as_outputs"](prog)
assert_same_output_names(prev_prog, prog)
assert prog["main"].outputs[1].op is not None # output comes from an op
assert prog["main"].outputs[1].op.op_type == "identity"
assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)})
def test_handle_unused_inputs():
@mb.program(
input_specs=[mb.TensorSpec(shape=(1, 2)),]
)
def prog(unused_input):
return mb.const(val=[3, 2])
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["nn_backend::handle_unused_inputs"](prog)
assert_same_output_names(prev_prog, prog)
id_op = prog.find_ops(op_type="identity", exactly_one=True)[0]
# Assert that input var is consumed by an identity op.
assert id_op in prog["main"].inputs["unused_input"].child_ops
assert_model_is_valid(prog, {"unused_input": (1, 2)})
|
064_Dense_Depth/nyu/01_float32/12_weight_quantization.py | IgiArdiyanto/PINTO_model_zoo | 1,529 | 12618751 | <reponame>IgiArdiyanto/PINTO_model_zoo
### tensorflow==2.3.1
import tensorflow as tf
# Weight Quantization - Input/Output=float32
height = 480
width = 640
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_nyu_{}x{}'.format(height, width))
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
with open('dense_depth_nyu_{}x{}_weight_quant.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print('Weight Quantization complete! - dense_depth_nyu_{}x{}_weight_quant.tflite'.format(height, width))
'''
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['input_1'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 480, 640, 3)
name: serving_default_input_1:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_1'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 240, 320, 1)
name: StatefulPartitionedCall:0
Method name is: tensorflow/serving/predict
''' |
Trakttv.bundle/Contents/Libraries/Shared/plugin/core/logger/filters/trakt_.py | disrupted/Trakttv.bundle | 1,346 | 12618756 | <reponame>disrupted/Trakttv.bundle
from logging import Filter
from six import string_types
from trakt.core.exceptions import ServerError, RequestError
import logging
IGNORED_MESSAGE_PREFIXES = [
'Continue retry since status is',
'OAuth - Unable to refresh expired token',
'request failed:',
'Retry #'
]
class TraktReportFilter(Filter):
def filter(self, record):
if self.is_server_error(record):
return False
if self.is_ignored_message(record):
return False
return True
@staticmethod
def is_ignored_message(record):
if record.levelno < logging.WARNING:
return False
for prefix in IGNORED_MESSAGE_PREFIXES:
if isinstance(record.msg, string_types) and record.msg.startswith(prefix):
return True
return False
@staticmethod
def is_server_error(record):
if record.levelno < logging.WARNING:
return False
if not record.exc_info or len(record.exc_info) != 3:
return False
exc_type, _, _ = record.exc_info
if not exc_type or not issubclass(exc_type, ServerError):
return False
return True
class TraktNetworkFilter(Filter):
def __init__(self, mode='exclude'):
super(TraktNetworkFilter, self).__init__()
if mode not in ['exclude', 'include']:
raise ValueError('Unknown filter mode: %r' % mode)
self.mode = mode
def filter(self, record):
if self.mode == 'exclude':
return (
not self.is_trakt_request_failed(record) and
not self.is_trakt_request_exception(record)
)
if self.mode == 'include':
return (
self.is_trakt_request_failed(record) or
self.is_trakt_request_exception(record)
)
return True
@staticmethod
def is_trakt_request_exception(record):
if record.levelno < logging.WARNING:
return False
if not record.exc_info or len(record.exc_info) != 3:
return False
exc_type, _, _ = record.exc_info
if not exc_type or not issubclass(exc_type, RequestError):
return False
return True
@staticmethod
def is_trakt_request_failed(record):
if record.levelno < logging.WARNING:
return False
if record.name != 'trakt.interfaces.base':
return False
if not record.msg:
return False
return record.msg.startswith('Request failed:')
|
object_detection/Swin/det_necks/__init__.py | no-name-xiaosheng/PaddleViT | 993 | 12618762 | <filename>object_detection/Swin/det_necks/__init__.py
from . import fpn
|
src/mem/slicc/ast/InPortDeclAST.py | mandaltj/gem5_chips | 135 | 12618765 | <filename>src/mem/slicc/ast/InPortDeclAST.py
# Copyright (c) 1999-2008 <NAME> and <NAME>
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.ast.TypeAST import TypeAST
from slicc.symbols import Func, Type, Var
class InPortDeclAST(DeclAST):
def __init__(self, slicc, ident, msg_type, var_expr, pairs, statements):
super(InPortDeclAST, self).__init__(slicc, pairs)
self.ident = ident
self.msg_type = msg_type
self.var_expr = var_expr
self.statements = statements
self.queue_type = TypeAST(slicc, "InPort")
def __repr__(self):
return "[InPortDecl: %s]" % self.ident
def generate(self):
symtab = self.symtab
void_type = symtab.find("void", Type)
machine = symtab.state_machine
if machine is None:
self.error("InPort declaration not part of a machine.")
code = self.slicc.codeFormatter()
queue_type = self.var_expr.generate(code)
if not queue_type.isInPort:
self.error("The inport queue's type must have the 'inport' " + \
"attribute. Type '%s' does not have this attribute.",
queue_type)
type = self.queue_type.type
self.pairs["buffer_expr"] = self.var_expr
in_port = Var(self.symtab, self.ident, self.location, type, str(code),
self.pairs, machine)
symtab.newSymbol(in_port)
symtab.pushFrame()
param_types = []
# Check for Event
type = symtab.find("Event", Type)
if type is None:
self.error("in_port decls require 'Event' enumeration defined")
param_types.append(type)
# Check for Address
type = symtab.find("Addr", Type)
if type is None:
self.error("in_port decls require 'Addr' type to be defined")
param_types.append(type)
if machine.EntryType != None:
param_types.append(machine.EntryType)
if machine.TBEType != None:
param_types.append(machine.TBEType)
# Add the trigger method - FIXME, this is a bit dirty
pairs = { "external" : "yes" }
trigger_func_name = "trigger"
for param in param_types:
trigger_func_name += "_" + param.ident
func = Func(self.symtab, trigger_func_name, "trigger", self.location,
void_type, param_types, [], "", pairs)
symtab.newSymbol(func)
# Add the stallPort method - this hacks reschedules the controller
# for stalled messages that don't trigger events
func = Func(self.symtab, "stallPort", "stallPort", self.location,
void_type, [], [], "", pairs)
symtab.newSymbol(func)
param_types = []
# Check for Event2
type = symtab.find("Event", Type)
if type is None:
self.error("in_port decls require 'Event' enumeration")
param_types.append(type)
# Check for Address2
type = symtab.find("Addr", Type)
if type is None:
self.error("in_port decls require 'Addr' type to be defined")
param_types.append(type)
if self.statements is not None:
rcode = self.slicc.codeFormatter()
rcode.indent()
rcode.indent()
self.statements.generate(rcode, None)
in_port["c_code_in_port"] = str(rcode)
symtab.popFrame()
# Add port to state machine
machine.addInPort(in_port)
|
testfixtures/tests/__init__.py | abcdenis/testfixtures | 184 | 12618777 | import warnings
warnings.simplefilter('default', ImportWarning)
|
serieswatcher/sqlobject/inheritance/tests/test_asdict.py | lightcode/SeriesWatcher | 303 | 12618790 | from sqlobject import *
from sqlobject.inheritance import *
from sqlobject.tests.dbtest import *
########################################
## sqlmeta.asDict
########################################
class InheritablePerson(InheritableSQLObject):
first = StringCol()
last = StringCol(alternateID=True, length=255)
class Boss(InheritablePerson):
department = StringCol()
class Employee(InheritablePerson):
_inheritable = False
position = StringCol()
def test_getColumns():
setupClass([InheritablePerson, Boss, Employee])
for klass, columns in (
(InheritablePerson, ['first', 'last']),
(Boss, ['department', 'first', 'last']),
(Employee, ['first', 'last', 'position'])):
_columns = klass.sqlmeta.getColumns().keys()
_columns.sort()
assert _columns == columns
def test_asDict():
setupClass([InheritablePerson, Boss, Employee])
InheritablePerson(first='Oneof', last='Authors')
Boss(first='Boss', last='The', department='Dep')
Employee(first='Project', last='Leader', position='Project leader')
assert InheritablePerson.get(1).sqlmeta.asDict() == \
dict(first='Oneof', last='Authors', id=1)
assert InheritablePerson.get(2).sqlmeta.asDict() == \
dict(first='Boss', last='The', department='Dep', id=2)
assert InheritablePerson.get(3).sqlmeta.asDict() == \
dict(first='Project', last='Leader', position='Project leader', id=3)
|
docs/conf.py | amirmalekicom/py_vollib | 145 | 12618860 | <filename>docs/conf.py<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
import sphinx_rtd_theme
from recommonmark.parser import CommonMarkParser
project = 'py_vollib'
copyright = '2017, Gammon Capital LLC.'
author = 'Gammon Capital LLC.'
version = '1.0'
release = '1.0.2'
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode']
source_parsers = {'.md': CommonMarkParser}
source_suffix = ['.rst', '.md']
master_doc = 'index'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = "sphinx_rtd_theme"
html_logo = "vollib_60.png"
html_favicon = "favicon.ico"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
DQM/EcalMonitorTasks/python/SelectiveReadoutTask_cfi.py | ckamtsikis/cmssw | 852 | 12618874 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
dccSizeBinEdges = []
for i in range(11) :
dccSizeBinEdges.append(0.608 / 10. * i)
for i in range(11, 79) :
dccSizeBinEdges.append(0.608 * (i - 10.))
ecalSelectiveReadoutTask = cms.untracked.PSet(
params = cms.untracked.PSet(
DCCZS1stSample = cms.untracked.int32(2),
useCondDb = cms.untracked.bool(False),
ZSFIRWeights = cms.untracked.vdouble(-0.374, -0.374, -0.3629, 0.2721, 0.4681, 0.3707)
),
MEs = cms.untracked.PSet(
HighIntOutput = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT high interest ZS filter output%(suffix)s'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(60.0),
nbins = cms.untracked.int32(120),
low = cms.untracked.double(-60.0),
title = cms.untracked.string('ADC counts*4')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Output of the ZS filter for high interest towers.')
),
ZS1Map = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/Counters/%(prefix)sSRT tower ZS1 counter%(suffix)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Tower occupancy with ZS1 flags.')
),
FullReadoutMap = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/Counters/%(prefix)sSRT tower full readout counter%(suffix)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Tower occupancy with FR flags.')
),
ZSFullReadout = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT ZS Flagged Fully Readout Number%(suffix)s'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(20.0),
nbins = cms.untracked.int32(20),
low = cms.untracked.double(0.0),
title = cms.untracked.string('number of towers')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Number of ZS flagged but fully read out towers.')
),
ZSFullReadoutMap = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/Counters/%(prefix)sSRT ZS flagged full readout counter%(suffix)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Number of ZS flagged but fully read out towers.')
),
FRDroppedMap = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/Counters/%(prefix)sSRT FR flagged dropped counter%(suffix)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Number of FR flagged but dropped towers.')
),
LowIntOutput = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT low interest ZS filter output%(suffix)s'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(60.0),
nbins = cms.untracked.int32(120),
low = cms.untracked.double(-60.0),
title = cms.untracked.string('ADC counts*4')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Output of the ZS filter for low interest towers.')
),
LowIntPayload = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT low interest payload%(suffix)s'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(3.0),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0.0),
title = cms.untracked.string('event size (kB)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Total data size from all low interest towers.')
),
RUForcedMap = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/Counters/%(prefix)sSRT RU with forced SR counter%(suffix)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Tower occupancy of FORCED flag.')
),
DCCSize = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT event size vs DCC'),
kind = cms.untracked.string('TH2F'),
yaxis = cms.untracked.PSet(
edges = cms.untracked.vdouble(dccSizeBinEdges),
title = cms.untracked.string('event size (kB)')
),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('DCC'),
description = cms.untracked.string('Distribution of the per-DCC data size.')
),
DCCSizeProf = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT DCC event size'),
kind = cms.untracked.string('TProfile'),
yaxis = cms.untracked.PSet(
title = cms.untracked.string('event size (kB)')
),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('DCC'),
description = cms.untracked.string('Mean and spread of the per-DCC data size.')
),
ZSMap = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/Counters/%(prefix)sSRT tower ZS1+ZS2 counter%(suffix)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Tower occupancy of ZS1 and ZS2 flags.')
),
HighIntPayload = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT high interest payload%(suffix)s'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(3.0),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0.0),
title = cms.untracked.string('event size (kB)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Total data size from all high interest towers.')
),
FlagCounterMap = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/Counters/%(prefix)sSRT tower flag counter%(suffix)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Tower occupancy of any SR flag.')
),
FRDropped = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT FR Flagged Dropped Readout Number%(suffix)s'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(20.0),
nbins = cms.untracked.int32(20),
low = cms.untracked.double(0.0),
title = cms.untracked.string('number of towers')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Number of FR flagged but dropped towers.')
),
EventSize = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT event size%(suffix)s'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(5.0),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0.0),
title = cms.untracked.string('event size (kB)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Distribution of per-DCC data size.')
),
FullReadout = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT full readout SR Flags Number%(suffix)s'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(200.0),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0.0),
title = cms.untracked.string('number of towers')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Number of FR flags per event.')
),
TowerSize = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSelectiveReadoutTask/%(prefix)sSRT tower event size%(suffix)s'),
kind = cms.untracked.string('TProfile2D'),
zaxis = cms.untracked.PSet(
title = cms.untracked.string('size (bytes)')
),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('2D distribution of the mean data size from each readout unit.')
)
)
)
|
Python/View_and_Combine_STL/combine.py | iamakkkhil/Rotten-Scripts | 1,127 | 12618900 | import math
import stl
from stl import mesh
import numpy
import glob
def combine_stl():
'''This function combines all the STL file in a directory and merges them together
Params: data_dir = stores the directory of stl files
stl_dir = list all the stl files
data = total number of vertices and their value
combine = Combines all the stl files relative to their location as placed originally
'''
#storing all the stl file in a directory
data_dir = "REPLACE WITH DIRECTORY OF THE STL FILES" #example "c:\users\username\..."
stl_dir = 'data_dir/*.stl'
#Creating an Empty mesh to concatenate all the stl file in a directory
data = numpy.zeros(0, dtype=mesh.Mesh.dtype)
combine = mesh.Mesh(data, remove_empty_areas=False)
files = glob.glob(stl_dir)
for fl in files:
stl_fl = mesh.Mesh.from_file(fl)
combine = mesh.Mesh(numpy.concatenate([stl_fl.data, combine.data]))
combine.save('combine.stl', mode=stl.Mode.ASCII)
|
fwtool/archive/fat.py | ma1co/fwtool.py | 115 | 12618906 | <gh_stars>100-1000
"""A parser for FAT file system images"""
import io
import posixpath
import shutil
from stat import *
import time
from . import *
from ..io import *
from ..util import *
FatHeader = Struct('FatHeader', [
('jump', Struct.STR % 3),
('oemName', Struct.STR % 8),
('bytesPerSector', Struct.INT16),
('sectorsPerCluster', Struct.INT8),
('reservedSectors', Struct.INT16),
('fatCopies', Struct.INT8),
('rootEntries', Struct.INT16),
('...', 2),
('mediaDescriptor', Struct.INT8),
('sectorsPerFat', Struct.INT16),
('...', 8),
('sectors', Struct.INT32),
('...', 2),
('extendedSignature', Struct.STR % 1),
('serialNumber', Struct.INT32),
('volumeLabel', Struct.STR % 11),
('fsType', Struct.STR % 8),
('...', 448),
('signature', Struct.STR % 2),
])
fatHeaderSignature = b'\x55\xaa'
fatHeaderExtendedSignature = b'\x29'
FatDirEntry = Struct('FatDirEntry', [
('name', Struct.STR % 8),
('ext', Struct.STR % 3),
('attr', Struct.INT8),
('...', 1),
('ctimeCs', Struct.INT8),
('...', 8),
('time', Struct.INT16),
('date', Struct.INT16),
('cluster', Struct.INT16),
('size', Struct.INT32),
])
VfatDirEntry = Struct('VfatDirEntry', [
('sequence', Struct.INT8),
('name1', Struct.STR % 10),
('attr', Struct.INT8),
('...', 1),
('checksum', Struct.INT8),
('name2', Struct.STR % 12),
('...', 2),
('name3', Struct.STR % 4),
])
def isFat(file):
header = FatHeader.unpack(file)
return header and header.signature == fatHeaderSignature and header.extendedSignature == fatHeaderExtendedSignature and header.fsType.startswith(b'FAT')
def readFat(file):
header = FatHeader.unpack(file)
if header.signature != fatHeaderSignature or header.extendedSignature != fatHeaderExtendedSignature:
raise Exception('Wrong magic')
fatOffset = header.reservedSectors * header.bytesPerSector
rootOffset = fatOffset + header.fatCopies * header.sectorsPerFat * header.bytesPerSector
dataOffset = rootOffset + ((header.rootEntries * FatDirEntry.size - 1) // header.bytesPerSector + 1) * header.bytesPerSector
file.seek(fatOffset)
if header.fsType == b'FAT12 ':
endMarker = 0xfff
packedClusters = [parse32le(file.read(3) + b'\0') for i in range(0, header.sectorsPerFat * header.bytesPerSector, 3)]
clusters = [cluster for packed in packedClusters for cluster in [packed & 0xfff, (packed >> 12) & 0xfff]]
elif header.fsType == b'FAT16 ':
endMarker = 0xffff
clusters = [parse16le(file.read(2)) for i in range(0, header.sectorsPerFat * header.bytesPerSector, 2)]
else:
raise Exception('Unknown FAT width')
def readDir(entries, path=''):
offset = 0
vfatName = b''
while entries[offset:offset+1] != b'\0':
entry = FatDirEntry.unpack(entries, offset)
if entry.name[0:1] == b'\xe5':
vfatName = b''
else:
if entry.attr == 0x0f:
# VFAT
vfatEntry = VfatDirEntry.unpack(entries, offset)
vfatName = vfatEntry.name1 + vfatEntry.name2 + vfatEntry.name3 + vfatName
else:
if vfatName != b'':
name = vfatName.decode('utf16').rstrip(u'\0\uffff')
vfatName = b''
else:
name = entry.name.decode('ascii').rstrip(' ')
if name[0] == '\x05':
name = '\xe5' + name[1:]
ext = entry.ext.decode('ascii').rstrip(' ')
if ext != '':
name += '.' + ext
if name != '.' and name != '..':
isLink = (entry.attr & 0x04) and (entry.ctimeCs & 0xe1) == 0x21
isDir = entry.attr & 0x10
def generateChunks(cluster=entry.cluster, size=entry.size, isDir=isDir):
read = 0
while cluster != 0 and cluster != endMarker and (read < size or isDir):
file.seek(dataOffset + (cluster - 2) * header.sectorsPerCluster * header.bytesPerSector)
block = file.read(header.sectorsPerCluster * header.bytesPerSector)
yield block if isDir else block[:size-read]
read += len(block)
cluster = clusters[cluster]
contents = ChunkedFile(generateChunks, entry.size if not isDir else -1)
yield UnixFile(
path = path + '/' + name,
size = entry.size,
mtime = time.mktime((1980 + (entry.date >> 9), (entry.date >> 5) & 0xf, entry.date & 0x1f, entry.time >> 11, (entry.time >> 5) & 0x3f, (entry.time & 0x1f) * 2, -1, -1, -1)),
mode = S_IFDIR if isDir else S_IFLNK if isLink else S_IFREG,
uid = 0,
gid = 0,
contents = contents if not isDir else None,
)
if isDir:
for f in readDir(contents.read(), path + '/' + name):
yield f
offset += FatDirEntry.size
file.seek(rootOffset)
for f in readDir(file.read(dataOffset - rootOffset)):
yield f
def writeFat(files, size, outFile):
files = {f.path: f for f in files}
tree = {'': set()}
for path in files:
while path != '':
parent = posixpath.dirname(path).rstrip('/')
tree.setdefault(parent, set()).add(path)
path = parent
sectorSize = 0x200
clusterSize = 0x4000
sectors = size // sectorSize
fatSize = (size // clusterSize + 1) // 2 * 3
fatSectors = (fatSize + sectorSize - 1) // sectorSize
outFile.write(FatHeader.pack(
jump = b'\xeb\0\x90',
oemName = 8*b'\0',
bytesPerSector = sectorSize,
sectorsPerCluster = clusterSize // sectorSize,
reservedSectors = 1,
fatCopies = 1,
rootEntries = clusterSize // FatDirEntry.size,
sectors = sectors,
mediaDescriptor = 0xf8,
sectorsPerFat = fatSectors,
extendedSignature = fatHeaderExtendedSignature,
serialNumber = 0,
volumeLabel = 11*b' ',
fsType = b'FAT12 ',
signature = fatHeaderSignature,
))
for i in range(sectors - 1):
outFile.write(sectorSize * b'\0')
fatOffset = sectorSize
rootOffset = fatOffset + fatSectors * sectorSize
dataOffset = rootOffset + clusterSize
clusters = [0xff8, 0xfff]
def writeData(f):
f.seek(0)
outFile.seek(dataOffset + (len(clusters) - 2) * clusterSize)
shutil.copyfileobj(f, outFile)
nc = (f.tell() + clusterSize - 1) // clusterSize
for i in range(nc):
clusters.append(len(clusters) + 1 if i < nc-1 else 0xfff)
return (len(clusters)-nc if nc else 0), f.tell()
def dirEntries(pc, c):
return FatDirEntry.pack(
name = b'. ',
ext = b' ',
attr = 0x10,
ctimeCs = 0,
time = 0,
date = 0,
cluster = c,
size = 0,
) + FatDirEntry.pack(
name = b'.. ',
ext = b' ',
attr = 0x10,
ctimeCs = 0,
time = 0,
date = 0,
cluster = pc,
size = 0,
)
dirs = {}
def writeDir(path):
data = io.BytesIO()
if path != '':
data.write(dirEntries(0, 0))
for p in tree.get(path, set()):
file = files.get(p, UnixFile(p, 0, 0, S_IFDIR | 0o775, 0, 0, None))
c, s = writeData(file.contents if not S_ISDIR(file.mode) else writeDir(file.path))
if S_ISDIR(file.mode):
dirs[file.path] = c
name, ext = (posixpath.basename(file.path).upper() + '.').split('.', 1)
name = name[:8].ljust(8, ' ').encode('ascii')
ext = ext[:3].ljust(3, ' ').encode('ascii')
sum = 0
for chr in (name + ext):
sum = (((sum & 1) << 7) + (sum >> 1) + chr) & 0xff
fn = posixpath.basename(file.path) + '\0'
vfatEntries = [fn[o:o+13] for o in range(0, len(fn), 13)]
for i, n in list(enumerate(vfatEntries))[::-1]:
n = n.encode('utf-16le').ljust(26, b'\xff')
data.write(VfatDirEntry.pack(
sequence = i + 1 + (0x40 if i == len(vfatEntries)-1 else 0),
name1 = n[:10],
attr = 0x0f,
checksum = sum,
name2 = n[10:22],
name3 = n[22:],
))
t = time.localtime(file.mtime)
data.write(FatDirEntry.pack(
name = name,
ext = ext,
attr = 0x10 if S_ISDIR(file.mode) else 0x04 if S_ISLNK(file.mode) else 0,
ctimeCs = 0x21 if S_ISLNK(file.mode) else 0,
time = (t.tm_hour << 11) + (t.tm_min << 5) + t.tm_sec // 2,
date = (max(t.tm_year - 1980, 0) << 9) + (t.tm_mon << 5) + t.tm_mday,
cluster = c,
size = s if not S_ISDIR(file.mode) else 0,
))
return data
root = writeDir('')
root.seek(0)
outFile.seek(rootOffset)
shutil.copyfileobj(root, outFile)
for p, c in dirs.items():
parent = posixpath.split(p)[0]
outFile.seek(dataOffset + (c - 2) * clusterSize)
outFile.write(dirEntries(dirs[parent] if parent != '/' else 0, c))
outFile.seek(fatOffset)
for i in range(0, len(clusters), 2):
outFile.write(dump32le(clusters[i] + ((clusters[i+1] << 12) if i+1 < len(clusters) else 0))[:3])
|
trackeval/_timing.py | AlexanderSing/TrackEval | 325 | 12618907 | from functools import wraps
from time import perf_counter
import inspect
DO_TIMING = False
DISPLAY_LESS_PROGRESS = False
timer_dict = {}
counter = 0
def time(f):
@wraps(f)
def wrap(*args, **kw):
if DO_TIMING:
# Run function with timing
ts = perf_counter()
result = f(*args, **kw)
te = perf_counter()
tt = te-ts
# Get function name
arg_names = inspect.getfullargspec(f)[0]
if arg_names[0] == 'self' and DISPLAY_LESS_PROGRESS:
return result
elif arg_names[0] == 'self':
method_name = type(args[0]).__name__ + '.' + f.__name__
else:
method_name = f.__name__
# Record accumulative time in each function for analysis
if method_name in timer_dict.keys():
timer_dict[method_name] += tt
else:
timer_dict[method_name] = tt
# If code is finished, display timing summary
if method_name == "Evaluator.evaluate":
print("")
print("Timing analysis:")
for key, value in timer_dict.items():
print('%-70s %2.4f sec' % (key, value))
else:
# Get function argument values for printing special arguments of interest
arg_titles = ['tracker', 'seq', 'cls']
arg_vals = []
for i, a in enumerate(arg_names):
if a in arg_titles:
arg_vals.append(args[i])
arg_text = '(' + ', '.join(arg_vals) + ')'
# Display methods and functions with different indentation.
if arg_names[0] == 'self':
print('%-74s %2.4f sec' % (' '*4 + method_name + arg_text, tt))
elif arg_names[0] == 'test':
pass
else:
global counter
counter += 1
print('%i %-70s %2.4f sec' % (counter, method_name + arg_text, tt))
return result
else:
# If config["TIME_PROGRESS"] is false, or config["USE_PARALLEL"] is true, run functions normally without timing.
return f(*args, **kw)
return wrap
|
tools/ceilslam/gui.py | ParikhKadam/cycloid | 156 | 12618919 | # -*- coding: utf-8 -*-
from __future__ import print_function
import cv2
import numpy as np
import time
import glfw
import OpenGL.GL as gl
import imgui
from imgui.integrations.glfw import GlfwRenderer
import ceiltrack
import recordreader
# starting position for localization
# negative x because we also mirror the track about X
HOME = [ceiltrack.X_GRID*-2.5, ceiltrack.Y_GRID*0.5]
def load_texture(im):
# gl.glEnable(gl.GL_TEXTURE_2D)
texid = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, texid)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA,
im.shape[1], im.shape[0], 0,
gl.GL_BGR, gl.GL_UNSIGNED_BYTE, im)
return texid
def unload_texture(texid):
gl.glDeleteTextures([texid])
def impl_glfw_init():
width, height = 1280, 720
window_name = "cycloid replay viewer"
if not glfw.init():
print("Could not initialize OpenGL context")
exit(1)
# OS X supports only forward-compatible core profiles from 3.2
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(
int(width), int(height), window_name, None, None
)
glfw.make_context_current(window)
if not window:
glfw.terminate()
print("Could not initialize Window")
exit(1)
return window
class SLAMGUI:
def __init__(self, fname):
self.unloadlist = []
self.f = open(fname, "rb")
print("scanning ", fname, "...")
self.scanner = recordreader.RecordScanner(self.f)
self.frametexid = None
self.playing = False
self.ts = []
self.camdata = ceiltrack.ceillut()
self.f.seek(0, 0)
self.ceilheight = ceiltrack.CEIL_HEIGHT
# do a full tracking here on load
B = np.float32([HOME[0], HOME[1], 0])
self.track = []
match_time = 0
opt_time = 0
first = True
floordata = []
floormask = None
for frdata in recordreader.RecordIterator(self.f):
if 'yuv420' not in frdata:
continue
self.ts.append(frdata['tstamp'])
yuv420 = frdata['yuv420']
gray = yuv420[:480]
bgr = cv2.cvtColor(yuv420, cv2.COLOR_YUV2BGR_I420)
t0 = time.time()
xy = ceiltrack.match(gray, *self.camdata)
tm = time.time()
if first:
first = False
for i in range(6):
cost, dB = ceiltrack.cost(xy, *B)
B += dB
#B_straight, cost_straight = B, cost
#B = np.float32([HOME[0], HOME[1], np.pi/2])
#for i in range(6):
# cost, dB = ceiltrack.cost(xy, *B)
# B += dB
#if cost_straight < cost:
# B = B_straight
# we need an example frame to initialize the floor lookup table
# to filter out the visible body posts
self.floorlut = ceiltrack.floorlut(gray)
floormask = self.floorlut[0]
else:
for i in range(2):
c, dB = ceiltrack.cost(xy, *B)
B += dB
topt = time.time()
match_time += tm - t0
opt_time += topt - tm
self.track.append(B.copy())
floordata.append(bgr[floormask])
self.ts = np.array(self.ts)
self.track = np.array(self.track)
self.origtrack = self.track.copy()
self.track[:, 0] = -self.track[:, 0]
self.track[:, 2] = -self.track[:, 2]
# mirror the floor-pixel lookup table x coordinates also
self.floorlut[1][0] = -self.floorlut[1][0]
self.floordata = np.array(floordata)
self.loadframe(0)
print("done,", match_time, "secs match_time", opt_time, "sec opt_time")
floorimg = ceiltrack.render_floor(
self.track, self.floordata, self.floorlut[1])
if True:
xgm = ceiltrack.X_GRID * ceiltrack.CEIL_HEIGHT
ygm = ceiltrack.Y_GRID * ceiltrack.CEIL_HEIGHT
Z = 50 # pixels per meter
for x in range(0, 1+int(1000 / (xgm*Z))):
for y in range(0, 1+int(500 / (ygm*Z))):
cv2.circle(floorimg, (int(x*xgm*Z), int(y*ygm*Z)), int(0.25*Z), (255, 255, 0))
cv2.imwrite("map.png", floorimg)
self.floortex = load_texture(floorimg)
print("home location:", HOME)
def loadframe(self, i):
if self.frametexid is not None:
self.unloadlist.append(self.frametexid)
self.i = i
self.frame = self.scanner.frame(i)
if 'yuv420' not in self.frame:
return
yuv420 = self.frame['yuv420']
# optional: front view and annotated ceiling view?
im = cv2.cvtColor(yuv420, cv2.COLOR_YUV2BGR_I420)
xg = ceiltrack.X_GRID * self.ceilheight / ceiltrack.CEIL_HEIGHT
yg = ceiltrack.Y_GRID * self.ceilheight / ceiltrack.CEIL_HEIGHT
gray = yuv420[:480]
xy = ceiltrack.match(gray, *self.camdata)
B = self.origtrack[self.i]
for i in range(6):
cost, dB = ceiltrack.costxyg(xg, yg, xy, *B)
B += dB
for gp in ceiltrack.mkgrid(xg, yg, 31, *-B)[0]:
cv2.circle(im, (int(gp[0]), int(gp[1])), 3, (255, 0, 0), 1)
self.frametexid = load_texture(im)
def nextframe(self):
if self.i < self.scanner.num_frames() - 1:
self.loadframe(self.i+1)
def render_timeline(self):
imgui.begin("timeline")
tstamp = self.frame['tstamp']
if imgui.button("<"):
self.playing = False
if self.i > 0:
self.loadframe(self.i - 1)
imgui.same_line()
if self.playing:
if (self.i == len(self.ts)-1) or imgui.button("stop"):
self.playing = False
elif time.time() >= self.ts[self.i+1] - self.t0:
self.nextframe()
elif imgui.button("play"):
self.playing = True
self.t0 = tstamp - time.time()
imgui.same_line()
if imgui.button(">"):
self.playing = False
self.nextframe()
tsfrac = tstamp - int(tstamp)
tstring = time.strftime("%H:%M:%S.", time.localtime(
tstamp)) + "%02d" % (tsfrac*100)
imgui.same_line()
imgui.text(tstring)
w = imgui.get_window_width()
imgui.image(self.frametexid, w, 480*w/640)
changed, i = imgui.slider_int(
"frame", self.i, 0, self.scanner.num_frames()-1)
if changed:
self.playing = False
self.loadframe(i)
imgui.end()
def render_map(self):
imgui.begin("map")
imgui.slider_float("x (m)", self.track[self.i, 0] * ceiltrack.CEIL_HEIGHT, -80, 80)
imgui.slider_float("y (m)", self.track[self.i, 1] * ceiltrack.CEIL_HEIGHT, -80, 80)
imgui.slider_float("theta", self.track[self.i, 2] % (np.pi*2), -7, 7)
imgui.slider_float("x (grid)", self.track[self.i, 0] / ceiltrack.X_GRID, -10, 10)
imgui.slider_float("y (grid)", self.track[self.i, 1] / ceiltrack.X_GRID, -10, 10)
changed, self.ceilheight = imgui.slider_float("ceiling height (m)", self.ceilheight, 2, 4)
if changed:
self.loadframe(self.i)
dl = imgui.get_window_draw_list()
pos = imgui.get_cursor_screen_pos()
siz = imgui.get_content_region_available()
if siz[1] == 0:
siz = [400, 300]
# just use a fixed size
w = siz[0]
imgui.image_button(self.floortex, w, w/2, frame_padding=0)
# imgui.image_button(self.floortex, siz[0], siz[0])
origin = [pos[0], pos[1]]
scale = 50 * ceiltrack.CEIL_HEIGHT * w/1000
trackcolor = imgui.get_color_u32_rgba(0.3, 0.5, 0.3, 1)
for i in range(1, self.i):
dl.add_line(
origin[0] + scale * self.track[i-1, 0],
origin[1] + scale * self.track[i-1, 1],
origin[0] + scale * self.track[i, 0],
origin[1] + scale * self.track[i, 1],
trackcolor, 1.5)
carcolor = imgui.get_color_u32_rgba(0, 1, 0.6, 1)
B = self.track[self.i]
dl.add_line(
origin[0] + scale * B[0],
origin[1] + scale * B[1],
origin[0] + scale * (B[0] + np.cos(B[2])),
origin[1] + scale * (B[1] - np.sin(B[2])),
carcolor, 1.5)
imgui.end()
def render(self):
for t in self.unloadlist:
unload_texture(t)
self.unloadlist = []
self.render_timeline()
self.render_map()
def main(recfile):
imgui.create_context()
window = impl_glfw_init()
impl = GlfwRenderer(window)
slamgui = SLAMGUI(recfile)
while not glfw.window_should_close(window):
glfw.poll_events()
impl.process_inputs()
imgui.new_frame()
if imgui.begin_main_menu_bar():
if imgui.begin_menu("File", True):
clicked_quit, _ = imgui.menu_item(
"Quit", 'Cmd+Q', False, True)
if clicked_quit:
exit(0)
imgui.end_menu()
imgui.end_main_menu_bar()
slamgui.render()
gl.glClearColor(0, 0, 0, 1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
imgui.render()
impl.render(imgui.get_draw_data())
glfw.swap_buffers(window)
impl.shutdown()
glfw.terminate()
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("usage:", sys.argv[0], "[cycloid-x.rec]")
exit(1)
main(sys.argv[1])
|
src/asp/IceBridge/full_processing_script.py | PicoJr/StereoPipeline | 323 | 12618929 | #!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
# Fetch all the data for a run and then process all the data.
# See sample usage below.
import os, sys, argparse, datetime, time, subprocess, logging, multiprocessing, re, shutil, time
import os.path as P
import glob
# The path to the ASP python files and tools
basepath = os.path.dirname(os.path.realpath(__file__)) # won't change, unlike syspath
pythonpath = os.path.abspath(basepath + '/../Python') # for dev ASP
libexecpath = os.path.abspath(basepath + '/../libexec') # for packaged ASP
binpath = os.path.abspath(basepath + '/../bin') # for packaged ASP
icebridgepath = os.path.abspath(basepath + '/../IceBridge') # IceBridge tools
toolspath = os.path.abspath(basepath + '/../Tools') # ASP Tools
# Prepend to Python path
sys.path.insert(0, basepath)
sys.path.insert(0, pythonpath)
sys.path.insert(0, libexecpath)
sys.path.insert(0, icebridgepath)
import icebridge_common, fetch_icebridge_data, process_icebridge_run, extract_icebridge_ATM_points
import input_conversions
import asp_system_utils, asp_alg_utils, asp_geo_utils
asp_system_utils.verify_python_version_is_supported()
# Prepend to system PATH
os.environ["PATH"] = basepath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = pythonpath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = libexecpath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = icebridgepath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = toolspath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = binpath + os.pathsep + os.environ["PATH"]
def fetchAllRunData(options, startFrame, stopFrame,
jpegFolder, orthoFolder, fireballFolder, lidarFolder, navFolder):
'''Download all data needed to process a run'''
logger = logging.getLogger(__name__)
logger.info('Downloading data for the run...')
baseCommand = (('--yyyymmdd %s --site %s --start-frame %d --stop-frame %d')
% (options.yyyymmdd, options.site, startFrame, stopFrame))
if options.maxNumLidarToFetch is not None and options.maxNumLidarToFetch >= 0:
baseCommand += ' --max-num-lidar-to-fetch ' + str(options.maxNumLidarToFetch)
if options.refetchIndex:
baseCommand += ' --refetch-index' # this was not right in older fetched runs
if options.refetchNav:
baseCommand += ' --refetch-nav' # sometimes this was corrupted
if options.stopAfterIndexFetch:
baseCommand += ' --stop-after-index-fetch'
if options.skipValidate:
baseCommand += ' --skip-validate'
if options.ignoreMissingLidar:
baseCommand += ' --ignore-missing-lidar'
if options.dryRun:
baseCommand += ' --dry-run'
jpegCommand = baseCommand + ' ' + jpegFolder
orthoCommand = baseCommand + ' ' + orthoFolder
fireballCommand = baseCommand + ' ' + fireballFolder
lidarCommand = baseCommand + ' ' + lidarFolder
navCommand = baseCommand + ' ' + navFolder
# Try to do all the downloads one after another
# - On a failure the error message should already be printed.
# - The fetching tool will not redownload existing data.
if fetch_icebridge_data.main(jpegCommand.split()) < 0:
return -1
if fetch_icebridge_data.main(orthoCommand.split()) < 0:
return -1
if fetch_icebridge_data.main(fireballCommand.split()) < 0:
logger.info('Fireball DEM data is optional, continuing run.')
if not options.noNavFetch:
if fetch_icebridge_data.main(navCommand.split()) < 0:
return -1
# Skip the lidar fetch if the user requested no lidar files
if (options.maxNumLidarToFetch is None) or (options.maxNumLidarToFetch > 0):
if fetch_icebridge_data.main(lidarCommand.split()) < 0:
return -1
# jpeg and ortho indices must be consistent
if not options.skipValidate:
logger.info("Check for consistency between raw and ortho images.")
jpegIndex = icebridge_common.csvIndexFile(jpegFolder)
orthoIndex = icebridge_common.csvIndexFile(orthoFolder)
(jpegFrameDict, jpegUrlDict) = icebridge_common.readIndexFile(jpegIndex)
(orthoFrameDict, orthoUrlDict) = icebridge_common.readIndexFile(orthoIndex)
for jpegFrame in jpegFrameDict.keys():
if jpegFrame < startFrame or jpegFrame > stopFrame:
continue
if jpegFrame not in orthoFrameDict.keys():
logger.info("Found jpeg frame missing from ortho: " + str(jpegFrame))
#raise Exception ("Found jpeg frame missing from ortho:" + str(jpegFrame))
for orthoFrame in orthoFrameDict.keys():
if orthoFrame < startFrame or orthoFrame > stopFrame:
continue
if orthoFrame not in jpegFrameDict.keys():
# This can happen, don't die because of it
logger.info("Found ortho frame missing from jpeg: " + str(orthoFrame))
#raise Exception ("Found ortho frame missing from jpeg:" + str(orthoFrame))
# TODO: Wipe any ortho and jpeg images not in the index, or at least warn about it.
return 0
def validateOrthosAndFireball(options, fileType, logger):
'''Validate ortho and fireball files within the current frame range. This
is expected to be in called in parallel for smaller chunks. Lidar files
will be validated serially. Jpegs get validated when converted to tif.
Return True if all is good.'''
badFiles = False
logger.info("Validating files of type: " + fileType)
if fileType == 'ortho':
dataFolder = icebridge_common.getOrthoFolder(options.outputFolder)
elif fileType == 'fireball':
dataFolder = icebridge_common.getFireballFolder(options.outputFolder)
else:
raise Exception("Unknown file type: " + fileType)
indexPath = icebridge_common.csvIndexFile(dataFolder)
if not os.path.exists(indexPath):
# The issue of what to do when the index does not exist should
# have been settled by now.
return (not badFiles)
# Fetch from disk the set of already validated files, if any
validFilesList = icebridge_common.validFilesList(options.outputFolder,
options.startFrame, options.stopFrame)
validFilesSet = set()
validFilesSet = icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
numInitialValidFiles = len(validFilesSet)
(frameDict, urlDict) = icebridge_common.readIndexFile(indexPath, prependFolder = True)
for frame in frameDict.keys():
if frame < options.startFrame or frame > options.stopFrame:
continue
outputPath = frameDict[frame]
xmlFile = icebridge_common.xmlFile(outputPath)
if outputPath in validFilesSet and os.path.exists(outputPath) and \
xmlFile in validFilesSet and os.path.exists(xmlFile):
#logger.info('Previously validated: ' + outputPath + ' ' + xmlFile)
continue
else:
isGood = icebridge_common.hasValidChkSum(outputPath, logger)
if not isGood:
logger.info('Found invalid data. Will wipe: ' + outputPath + ' ' + xmlFile)
os.system('rm -f ' + outputPath) # will not throw
os.system('rm -f ' + xmlFile) # will not throw
badFiles = True
else:
logger.info('Valid file: ' + outputPath)
validFilesSet.add(outputPath)
validFilesSet.add(xmlFile)
if fileType != 'fireball':
continue
# Also validate tfw
tfwFile = icebridge_common.tfwFile(outputPath)
xmlFile = icebridge_common.xmlFile(tfwFile)
if tfwFile in validFilesSet and os.path.exists(tfwFile) and \
xmlFile in validFilesSet and os.path.exists(xmlFile):
#logger.info('Previously validated: ' + tfwFile + ' ' + xmlFile)
continue
else:
isGood = icebridge_common.isValidTfw(tfwFile, logger)
if not isGood:
logger.info('Found invalid tfw. Will wipe: ' + tfwFile + ' ' + xmlFile)
os.system('rm -f ' + tfwFile) # will not throw
os.system('rm -f ' + xmlFile) # will not throw
badFiles = True
else:
logger.info('Valid tfw file: ' + tfwFile)
validFilesSet.add(tfwFile)
validFilesSet.add(xmlFile)
# Write to disk the list of validated files, but only if new
# validations happened. First re-read that list, in case a
# different process modified it in the meantime, such as if two
# managers are running at the same time.
numFinalValidFiles = len(validFilesSet)
if numInitialValidFiles != numFinalValidFiles:
validFilesSet = \
icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
icebridge_common.writeValidFilesList(validFilesList, validFilesSet)
return (not badFiles)
def runFetchConvert(options, isSouth, cameraFolder, imageFolder, jpegFolder, orthoFolder,
fireballFolder, corrFireballFolder, lidarFolder, processedFolder,
navFolder, navCameraFolder, refDemPath, logger):
'''Fetch and/or convert. Return 0 on success.'''
if options.noFetch:
logger.info('Skipping fetch.')
else:
# Call data fetch routine and check the result
fetchResult = fetchAllRunData(options, options.startFrame, options.stopFrame,
jpegFolder, orthoFolder, fireballFolder, lidarFolder,
navFolder)
if fetchResult < 0:
logger.error("Fetching failed!")
return -1
# This step is slow, so run it here as part of fetching and save its result
# We certainly don't want it to throw any exception at this stage.
try:
forceAllFramesInRange = True
availableFrames = []
(autoStereoInterval, breaks) = \
process_icebridge_run.getImageSpacing(orthoFolder, availableFrames,
options.startFrame,
options.stopFrame,
options.maxOverlapRatio,
forceAllFramesInRange)
except Exception as e:
pass
if options.stopAfterFetch or options.dryRun:
logger.info('Fetching complete, finished!')
return 0
# Keep track of how we are doing
isGood = True
if options.noConvert:
logger.info('Skipping convert.')
else:
# When files fail in these conversion functions we log the error and keep going
if not options.skipFastConvert:
if not options.skipValidate:
# Validate orthos and dems for this frame range.
ans = validateOrthosAndFireball(options, 'ortho', logger)
isGood = (isGood and ans)
ans = validateOrthosAndFireball(options, 'fireball', logger)
isGood = (isGood and ans)
# Run non-ortho conversions without any multiprocessing (they are pretty fast)
# TODO: May be worth doing the faster functions with multiprocessing in the future
if not options.noLidarConvert:
ans = input_conversions.convertLidarDataToCsv(lidarFolder,
options.startFrame, options.stopFrame,
options.skipValidate,
logger)
isGood = (isGood and ans)
ans = input_conversions.pairLidarFiles(lidarFolder, options.skipValidate, logger)
isGood = (isGood and ans)
ans = input_conversions.correctFireballDems(fireballFolder, corrFireballFolder,
options.startFrame, options.stopFrame,
(not isSouth), options.skipValidate,
logger)
isGood = (isGood and ans)
ans = input_conversions.convertJpegs(jpegFolder, imageFolder,
options.startFrame, options.stopFrame,
options.skipValidate, options.cameraMounting,
logger)
isGood = (isGood and ans)
if not options.noNavFetch:
# Single process call to parse the nav files.
input_conversions.getCameraModelsFromNav(imageFolder, orthoFolder,
options.inputCalFolder,
options.inputCalCamera,
options.cameraLookupFile,
navFolder, navCameraFolder,
options.yyyymmdd, options.site,
options.startFrame, options.stopFrame,
options.cameraMounting,
logger)
else:
navCameraFolder = ""
options.simpleCameras = False
if not options.noOrthoConvert:
# Multi-process call to convert ortho images
input_conversions.getCameraModelsFromOrtho(imageFolder, orthoFolder,
options.inputCalFolder,
options.inputCalCamera,
options.cameraLookupFile,
options.noNavFetch,
navCameraFolder,
options.yyyymmdd, options.site,
refDemPath, cameraFolder,
options.simpleCameras,
options.startFrame, options.stopFrame,
options.framesFile,
options.numOrthoProcesses, options.numThreads,
logger)
os.system("rm -f core.*") # these keep on popping up
if isGood:
return 0
return -1
def processTheRun(options, imageFolder, cameraFolder, lidarFolder, orthoFolder,
fireballFolder, processedFolder, isSouth, refDemPath):
'''Do all the run processing'''
# Some care is taken with the --stereo-arguments argument to make sure it is passed correctly.
processCommand = (('%s %s %s %s --bundle-length %d --fireball-folder %s ' +
'--ortho-folder %s --num-processes %d --num-threads %d ' +
'--reference-dem %s --max-overlap-ratio %g')
% (imageFolder, cameraFolder, lidarFolder, processedFolder,
options.bundleLength, fireballFolder, orthoFolder, options.numProcesses,
options.numThreads, refDemPath, options.maxOverlapRatio))
if isSouth:
processCommand += ' --south'
if options.startFrame:
processCommand += ' --start-frame ' + str(options.startFrame)
if options.stopFrame:
processCommand += ' --stop-frame ' + str(options.stopFrame)
if options.logBatches:
processCommand += ' --log-batches'
if options.cleanup:
processCommand += ' --cleanup'
if options.manyip:
processCommand += ' --many-ip'
processCommand += ' --stereo-arguments '
logger = logging.getLogger(__name__)
logger.info('Process command: process_icebridge_run ' +
processCommand + options.stereoArgs.strip())
args = processCommand.split()
args += (options.stereoArgs.strip(),) # Make sure this is properly passed
process_icebridge_run.main(args)
def solveIntrinsics_Part1(options, jpegFolder, cameraFolder, navCameraFolder, processedFolder,
logger):
'''Some preliminary work before solving for intrinsics. Here we
look up the default calibration file, and generate an RPC
approximation of its distortion model with polynomials of degree
4. We will then create cameras and stereo DEMs using this initial
camera file with RPC distortion.'''
# Sanity checks
if options.startFrame == icebridge_common.getSmallestFrame() or \
options.stopFrame == icebridge_common.getLargestFrame():
raise Exception("When solving for intrinsics, must specify a frame range.")
if options.bundleLength != 2:
raise Exception("When solving for intrinsics, we assume bundle length of 2.")
if (options.stopFrame - options.startFrame) % 2 == 0:
raise Exception("When solving for intrinsics, must have an even number of frames, " +
" so stopFrame - startFrame must be odd.")
if options.processingSubfolder:
raise Exception("Processing subfolder not supported when solving for intrinsics.")
# Generate extra data we will use later to float intrinsics
options.stereoArgs += " --num-matches-from-disp-triplets 10000 --unalign-disparity " # --enable-fill-holes "
# Create separate directories for cameras and processed data,
# as these will be distinct than what we will finally be
# using to do the full run.
suff = "_camgen"
cameraFolder += suff
navCameraFolder += suff
processedFolder += suff
# Get the input calibration file
defaultCalibFile = ""
for frame in range(options.startFrame, options.stopFrame+1):
currCalibFile = input_conversions.getCalibrationFileForFrame(options.cameraLookupFile,
options.inputCalFolder,
frame, options.yyyymmdd,
options.site, logger)
if defaultCalibFile == "":
defaultCalibFile = currCalibFile
if defaultCalibFile != currCalibFile:
# This is important, the calibration file must be unique
raise Exception("Found two distinct calibration files: " + defaultCalibFile + \
" and " + currCalibFile)
logger.info("Default calibration file: " + defaultCalibFile)
if options.inputCalCamera != "":
defaultCalibFile = options.inputCalCamera
logger.info("Using instead the user-provided: " + defaultCalibFile)
# Find the first image in the range
jpegIndex = icebridge_common.csvIndexFile(jpegFolder)
(jpegFrameDict, jpegUrlDict) = icebridge_common.readIndexFile(jpegIndex,
prependFolder = True)
if options.startFrame not in jpegFrameDict.keys():
raise Exception("Could not find jpeg image for frame: " + options.startFrame)
firstImage = jpegFrameDict[options.startFrame]
# Create the RPC file before optimization
rpcCalibFile = os.path.join(processedFolder, os.path.basename(defaultCalibFile))
rpcCalibFile = rpcCalibFile.replace(".tsai", "_INIT_RPC.tsai")
logger.info("Will approximate camera model " + defaultCalibFile + " with " + \
options.outputModelType + " model " + rpcCalibFile)
if not os.path.exists(defaultCalibFile):
raise Exception('Cannot find file: ' + defaultCalibFile)
os.system("mkdir -p " + os.path.dirname(rpcCalibFile))
cmd = "convert_pinhole_model --input-file " + firstImage + ' --camera-file ' + \
defaultCalibFile + ' --output-type ' + options.outputModelType + \
' --sample-spacing 50 -o ' + rpcCalibFile
logger.info(cmd)
os.system(cmd)
# Use this one from now on
options.inputCalCamera = rpcCalibFile
# Return the modified values
return (options, cameraFolder, navCameraFolder, processedFolder)
def solveIntrinsics_Part2(options, imageFolder, cameraFolder, lidarFolder, orthoFolder,
processedFolder, isSouth, logger):
'''Create a camera model with optimized intrinsics. By now we
processed a bunch of images and created bundle-adjusted and
pc_aligned cameras and DEMs while using a camera model with
distortion implemented using RPC coefficients which was obtained
from the photometrics model. We now use the obtained cameras as
inputs to a bundle adjust problem where we will optimize the
intrinsics, including the distortion RPC coefficients, using the
lidar as an external constraint, and many dense IP pairs and
triplets (no quadruplets yet, even if 4 images overlap).'''
# Get a list of all the input files
imageCameraPairs = icebridge_common.getImageCameraPairs(imageFolder, cameraFolder,
options.startFrame, options.stopFrame,
logger)
# The paired lidar file for the first image should be huge enough to contain
# all images.
lidarFile = icebridge_common.findMatchingLidarFile(imageCameraPairs[0][0], lidarFolder)
logger.info('Found matching lidar file ' + lidarFile)
lidarCsvFormatString = icebridge_common.getLidarCsvFormat(lidarFile)
numFiles = len(imageCameraPairs)
if numFiles < 2:
raise Exception('Failed to find any image camera pairs!')
if numFiles % 2 != 0:
raise Exception("When solving for intrinsics, must have an even number of frames to use.")
# Collect pc_align-ed cameras, unaligned disparities, and dense match files
images = []
cameras = []
for it in range(numFiles/2):
begFrame = options.startFrame + 2*it
endFrame = begFrame + 1
batchFolderName = icebridge_common.batchFolderName(begFrame, endFrame, options.bundleLength)
thisOutputFolder = os.path.join(processedFolder, batchFolderName)
# Find all the cameras after bundle adjustment and pc_align.
pattern = icebridge_common.getAlignedBundlePrefix(thisOutputFolder) + '*.tsai'
alignedCameras = glob.glob(pattern)
if len(alignedCameras) != options.bundleLength:
raise Exception("Expected " + str(options.bundleLength) + " cameras, here's what " +
" was obtained instead: " + " ".join(alignedCameras))
img0 = ""; cam0 = ""; img1 = ""; cam1 = ""
for cam in alignedCameras:
frame = icebridge_common.getFrameNumberFromFilename(cam)
if begFrame == frame:
img0 = imageCameraPairs[2*it][0]
cam0 = cam
if endFrame == frame:
img1 = imageCameraPairs[2*it+1][0]
cam1 = cam
images.append(img0); images.append(img1)
cameras.append(cam0); cameras.append(cam1)
# Match files and disp files
dispFiles = []
matchFiles = []
for it in range(numFiles-1):
begFrame = options.startFrame + it
endFrame = begFrame + 1
batchFolderName = icebridge_common.batchFolderName(begFrame, endFrame, options.bundleLength)
thisOutputFolder = os.path.join(processedFolder, batchFolderName)
stereoFolder = os.path.join(thisOutputFolder, 'stereo_pair_'+str(0))
DISP_PREFIX = "disp-"
currMatchFiles = glob.glob(os.path.join(stereoFolder, '*' + DISP_PREFIX + '*.match'))
if len(currMatchFiles) != 1:
raise Exception("Expecting a single dense match file in " + stereoFolder)
matchFiles.append(currMatchFiles[0])
currDispFiles = glob.glob(os.path.join(stereoFolder, '*unaligned-D.tif'))
if len(currDispFiles) != 1:
raise Exception("Expecting a single unaligned disparity file in " + stereoFolder)
dispFiles.append(currDispFiles[0])
# Create output directory for bundle adjustment and copy there the match files
baDir = os.path.join(processedFolder, "bundle_intrinsics")
baPrefix = os.path.join(baDir, "out")
os.system("mkdir -p " + baDir)
for matchFile in matchFiles:
dstFile = os.path.basename(matchFile)
dstFile = dstFile.replace(DISP_PREFIX, '')
dstFile = os.path.join(baDir, dstFile)
cmd = "cp -f " + matchFile + " " + dstFile
logger.info(cmd)
os.system(cmd)
# The bundle adjustment
solveIntr = ""
if not options.skipSolvingIntrinsics:
solveIntr = " --solve-intrinsics "
cmd = "bundle_adjust " + " ".join(images) + " " + " ".join(cameras) + \
' --reference-terrain ' + lidarFile + \
' --disparity-list "' + " ".join(dispFiles) + '"' + \
' --datum wgs84 -t nadirpinhole --inline-adjustments --robust-threshold 2' + \
' --camera-weight 0 --csv-format ' + lidarCsvFormatString + \
' --overlap-limit 1 --max-disp-error 50 --max-iterations 100 ' + \
solveIntr + ' --parameter-tolerance 1e-12 -o ' + baPrefix
logger.info(cmd)
os.system(cmd)
# Generate DEMs of residuals before and after optimization
projString = icebridge_common.getEpsgCode(isSouth, asString=True)
for val in ['initial', 'final']:
cmd = 'point2dem --t_srs ' + projString + ' --tr 2' + \
' --csv-format 1:lon,2:lat,4:height_above_datum' + \
' ' + baPrefix + '-' + val + '_residuals_.csv'
logger.info(cmd)
os.system(cmd)
cmd = 'point2dem --t_srs ' + projString + ' --tr 2' + \
' --csv-format 1:lon,2:lat,4:height_above_datum' + \
' ' + baPrefix + '-' + val +'_residuals_reference_terrain.txt'
logger.info(cmd)
os.system(cmd)
# Look at the latest written tsai file, that will be the optimized distortion file.
# Force the initial rotation and translation to be the identity, this is
# expected by ortho2pinhole.
outFiles = filter(os.path.isfile, glob.glob(baPrefix + '*.tsai'))
outFiles.sort(key=lambda x: os.path.getmtime(x))
optFile = outFiles[-1]
logger.info("Reading optimized file: " + optFile)
with open(optFile, 'r') as f:
lines = f.readlines()
for it in range(len(lines)):
lines[it] = lines[it].strip()
if re.match("^C\s*=\s*", lines[it]):
lines[it] = "C = 0 0 0"
if re.match("^R\s*=\s*", lines[it]):
lines[it] = "R = 1 0 0 0 1 0 0 0 1"
# Write the final desired optimized RPC file
logger.info("Writing final optimized file: " + options.outputCalCamera)
# Below is a bugfix, must take full path to find the dir, otherwise it may fail.
os.system("mkdir -p " + os.path.dirname(os.path.abspath(options.outputCalCamera)))
with open(options.outputCalCamera, 'w') as f:
for line in lines:
f.write(line + "\n")
def main(argsIn):
try:
# Sample usage:
# python full_processing_script.py \
# --yyyymmdd 20091016 --site AN --num-processes 1 --num-threads 12 --bundle-length 12 \
# --start-frame 350 --stop-frame 353 --skip-validate \
# --camera-calibration-folder camera_calib \
# --reference-dem-folder ref_dem_folder
# An output folder will be crated automatically (with a name like
# AN_20091016), or its name can be specified via the --output-folder
# option.
usage = '''full_processing_script.py <options>'''
parser = argparse.ArgumentParser(usage=usage)
# Run selection
parser.add_argument("--yyyymmdd", dest="yyyymmdd", required=True,
help="Specify the year, month, and day in one YYYYMMDD string.")
parser.add_argument("--site", dest="site", required=True,
help="Name of the location of the images (AN, GR, or AL)")
parser.add_argument("--output-folder", dest="outputFolder", default=None,
help="Name of the output folder. If not specified, " + \
"use something like AN_YYYYMMDD.")
parser.add_argument("--camera-lookup-file", dest="cameraLookupFile", default=None,
help="The file to use to find which camera was used for which " + \
"flight. By default it is in the same directory as this script " + \
"and named camera_lookup.txt.")
# Processing options
parser.add_argument('--bundle-length', dest='bundleLength', default=2,
type=int, help="The number of images to bundle adjust and process " + \
"in a single batch.")
# TODO: Compute this automatically??
parser.add_argument('--overlap-limit', dest='overlapLimit', default=2,
type=int, help="The number of images to treat as overlapping for " + \
"bundle adjustment.")
parser.add_argument('--max-overlap-ratio', dest='maxOverlapRatio', default=0.85,
type=float, help='The maximum ratio of overlap between images to be accepted as part of a stereo pair. When floating intrinsics, this will be set to 1, to not upset some bookkeeping.')
parser.add_argument('--stereo-arguments', dest='stereoArgs',
# set --min-xcorr-level 0 to do the left-to-right
# and right-to-left consistency check at the lowest level.
default='--stereo-algorithm 2 --min-xcorr-level 0',
help='Extra arguments to pass to stereo.')
parser.add_argument('--start-frame', dest='startFrame', type=int,
default=icebridge_common.getSmallestFrame(),
help="Frame to start with. Leave this and stop-frame blank to " + \
"process all frames.")
parser.add_argument('--stop-frame', dest='stopFrame', type=int,
default=icebridge_common.getLargestFrame(),
help='Frame to stop on.')
parser.add_argument('--frames-file', dest='framesFile', default="",
help='Specific frames to run ortho2pinhole on within this frame range.')
parser.add_argument('--max-num-lidar-to-fetch', dest='maxNumLidarToFetch', default=None,
type=int, help="The maximum number of lidar files to fetch. " + \
"This is used in debugging.")
parser.add_argument("--camera-calibration-folder", dest="inputCalFolder", default=None,
help="The folder containing camera calibration.")
parser.add_argument("--input-calibration-camera", dest="inputCalCamera", default="",
help="Instead of looking up the calibrated camera in the calibration folder, use this one.")
parser.add_argument("--output-calibration-camera", dest="outputCalCamera", default="",
help="If specified, float the intrinsics and write the optimized model here.")
parser.add_argument("--output-model-type", dest="outputModelType", default="RPC",
help="Generate a distortion model of type RPC, RPC5, or RPC6.")
parser.add_argument("--skip-solving-intrinsics", action="store_true",
dest="skipSolvingIntrinsics", default=False,
help="When jointly solving for all extrinsics and intrinsics, " + \
"keep the intrinsics fixed.")
parser.add_argument("--reference-dem-folder", dest="refDemFolder", default=None,
help="The folder containing DEMs that created orthoimages.")
parser.add_argument("--processing-subfolder", dest="processingSubfolder", default=None,
help="Specify a subfolder name where the processing outputs will go. " + \
"fault is no additional folder")
parser.add_argument("--simple-cameras", action="store_true", dest="simpleCameras", default=False,
help="Don't use orthoimages to refine the camera models.")
# This option is only needed when generating camera models from the nav files.
parser.add_argument('--camera-mounting', default=0, dest='cameraMounting', type=int,
help='0=right-forwards, 1=left-forwards, 2=top-forwards, 3=bottom-forwards.')
# Performance options
parser.add_argument('--num-processes', dest='numProcesses', default=1,
type=int, help='The number of simultaneous processes to run.')
parser.add_argument('--num-ortho-processes', dest='numOrthoProcesses', default=-1,
type=int, help='The number of simultaneous ortho processes to run.')
parser.add_argument('--num-threads', dest='numThreads', default=8,
type=int, help='The number of threads per process.')
# Action control
parser.add_argument("--skip-fetch", action="store_true", dest="noFetch", default=False,
help="Skip data fetching.")
parser.add_argument("--skip-convert", action="store_true", dest="noConvert", default=False,
help="Skip data conversion.")
parser.add_argument("--stop-after-fetch", action="store_true", dest="stopAfterFetch",
default=False,
help="Stop program after data fetching.")
parser.add_argument("--stop-after-convert", action="store_true", dest="stopAfterConvert",
default=False,
help="Stop program after data conversion.")
parser.add_argument("--skip-validate", action="store_true", dest="skipValidate",
default=False,
help="Skip input data validation.")
parser.add_argument("--ignore-missing-lidar", action="store_true", dest="ignoreMissingLidar",
default=False,
help="Keep going if the lidar is missing.")
parser.add_argument("--log-batches", action="store_true", dest="logBatches", default=False,
help="Log the required batch commands without running them.")
parser.add_argument('--cleanup', action='store_true', default=False, dest='cleanup',
help='If the final result is produced delete intermediate files.')
parser.add_argument('--many-ip', action='store_true', default=False, dest='manyip',
help='If to use a lot of IP in bundle adjustment from the beginning.')
parser.add_argument("--dry-run", action="store_true", dest="dryRun", default=False,
help="Set up the input directories but do not fetch/process any imagery.")
parser.add_argument("--refetch", action="store_true", dest="reFetch", default=False,
help="Try fetching again if some files turned out invalid " + \
"during conversions.")
parser.add_argument("--refetch-index", action="store_true", dest="refetchIndex",
default=False,
help="Force refetch of the index file.")
parser.add_argument("--refetch-nav", action="store_true", dest="refetchNav",
default=False,
help="Force refetch of the nav file.")
parser.add_argument("--stop-after-index-fetch", action="store_true",
dest="stopAfterIndexFetch", default=False,
help="Stop after fetching the indices.")
parser.add_argument("--no-nav", action="store_true", dest="noNavFetch",
default=False, help="Don't fetch or convert the nav data.")
parser.add_argument("--no-lidar-convert", action="store_true", dest="noLidarConvert",
default=False,
help="Skip lidar files in the conversion step.")
parser.add_argument("--no-ortho-convert", action="store_true", dest="noOrthoConvert",
default=False,
help="Skip generating camera models in the conversion step.")
parser.add_argument("--skip-fast-conversions", action="store_true", dest="skipFastConvert",
default=False,
help="Skips all non-ortho conversions.")
options = parser.parse_args(argsIn)
except argparse.ArgumentError as msg:
parser.error(msg)
icebridge_common.switchWorkDir()
if options.numOrthoProcesses < 0:
options.numOrthoProcesses = options.numProcesses
isSouth = icebridge_common.checkSite(options.site)
# Turned off elevation limits here since they are being set from LIDAR data.
## Add the site based elevation limits to the stereoArgs option
#altLimits = icebridge_common.getElevationLimits(options.site)
#options.stereoArgs = (' %s --elevation-limit %f %f '
# % (options.stereoArgs, altLimits[0], altLimits[1]))
options.stereoArgs = (' %s ' % (options.stereoArgs))
if options.cameraLookupFile is None:
options.cameraLookupFile = P.join(basepath, 'camera_lookup.txt')
if not os.path.isfile(options.cameraLookupFile):
raise Exception("Can't find camera file: " + options.cameraLookupFile)
if len(options.yyyymmdd) != 8 and len(options.yyyymmdd) != 9:
# Make an exception for 20100422a
raise Exception("The --yyyymmdd field must have length 8 or 9.")
if options.outputFolder is None:
options.outputFolder = icebridge_common.outputFolder(options.site, options.yyyymmdd)
if options.stopAfterIndexFetch:
options.stopAfterFetch = True
os.system('mkdir -p ' + options.outputFolder)
logLevel = logging.INFO # Record everything
logger = icebridge_common.setUpLogger(options.outputFolder, logLevel,
'icebridge_processing_log_frames_' + \
str(options.startFrame) + "_" + str(options.stopFrame))
# Make sure we later know what we were doing
logger.info("full_processing_script.py " + " ".join(argsIn))
(out, err, status) = asp_system_utils.executeCommand(['uname', '-a'],
suppressOutput = True)
logger.info("Running on machine: " + out)
logger.info("Work dir is " + os.getcwd())
os.system("ulimit -c 0") # disable core dumps
os.system("umask 022") # enforce files be readable by others
# Perform some input checks and initializations
# These are not needed unless cameras are initialized
if options.inputCalFolder is None or not os.path.exists(options.inputCalFolder):
raise Exception("Missing camera calibration folder.")
if options.refDemFolder is None or not os.path.exists(options.refDemFolder):
raise Exception("Missing reference DEM folder.")
refDemName = icebridge_common.getReferenceDemName(options.site)
refDemPath = os.path.join(options.refDemFolder, refDemName)
if not os.path.exists(refDemPath):
raise Exception("Missing reference DEM: " + refDemPath)
# TODO: CLEAN UP!!!
# Set up the output folders
cameraFolder = icebridge_common.getCameraFolder(options.outputFolder)
imageFolder = icebridge_common.getImageFolder(options.outputFolder)
jpegFolder = icebridge_common.getJpegFolder(options.outputFolder)
orthoFolder = icebridge_common.getOrthoFolder(options.outputFolder)
fireballFolder = icebridge_common.getFireballFolder(options.outputFolder)
corrFireballFolder = icebridge_common.getCorrFireballFolder(options.outputFolder)
lidarFolder = icebridge_common.getLidarFolder(options.outputFolder)
navFolder = icebridge_common.getNavFolder(options.outputFolder)
navCameraFolder = icebridge_common.getNavCameraFolder(options.outputFolder)
processedFolder = icebridge_common.getProcessedFolder(options.outputFolder)
if options.outputCalCamera != "":
if options.maxOverlapRatio < 1:
raise Exception ("For optimizing intrinsics, must set --max-overlap-ratio to 1, " + \
"to always use consecutive frames.")
# Prepare to solve for intrinsics. Note that this modifies some things along the way.
(options, cameraFolder, navCameraFolder, processedFolder) = \
solveIntrinsics_Part1(options, jpegFolder, cameraFolder, navCameraFolder,
processedFolder, logger)
# Handle subfolder option. This is useful for comparing results with different parameters!
if options.processingSubfolder:
processedFolder = os.path.join(processedFolder, options.processingSubfolder)
logger.info('Will write to processing subfolder: ' + options.processingSubfolder)
# If something failed in the first attempt either in fetch or in
# convert, we will wipe bad files, and try to refetch/re-convert.
numAttempts = 1
if options.reFetch and (not options.noFetch):
numAttempts = 2
for attempt in range(numAttempts):
if numAttempts > 1:
logger.info("Fetch/convert attempt: " + str(attempt+1))
ans = runFetchConvert(options, isSouth, cameraFolder, imageFolder, jpegFolder, orthoFolder,
fireballFolder, corrFireballFolder, lidarFolder, processedFolder,
navFolder, navCameraFolder, refDemPath, logger)
if ans == 0:
break
if options.stopAfterFetch or options.dryRun or options.stopAfterConvert:
logger.info('Fetch/convert finished!')
return 0
# Call the processing routine
processTheRun(options, imageFolder, cameraFolder, lidarFolder, orthoFolder,
corrFireballFolder, processedFolder,
isSouth, refDemPath)
if options.outputCalCamera != "":
# Finish solving for intrinscs.
solveIntrinsics_Part2(options, imageFolder, cameraFolder, lidarFolder, orthoFolder,
processedFolder, isSouth, logger)
# Run main function if file used from shell
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
my_package/FilterInterpolation/__init__.py | JamesPerlman/Dain-App | 7,517 | 12618963 | from .FilterInterpolationModule import *
|
softgym/registered_env.py | ipab-rad/softgym | 147 | 12618966 | <reponame>ipab-rad/softgym
from softgym.envs.pour_water import PourWaterPosControlEnv
from softgym.envs.pour_water_amount import PourWaterAmountPosControlEnv
from softgym.envs.pass_water import PassWater1DEnv
from softgym.envs.rope_flatten import RopeFlattenEnv
from softgym.envs.rope_configuration import RopeConfigurationEnv
from softgym.envs.cloth_flatten import ClothFlattenEnv
from softgym.envs.cloth_fold import ClothFoldEnv
from softgym.envs.cloth_drop import ClothDropEnv
from softgym.envs.cloth_fold_crumpled import ClothFoldCrumpledEnv
from softgym.envs.cloth_fold_drop import ClothFoldDropEnv
from collections import OrderedDict
env_arg_dict = {
'PourWater': {'observation_mode': 'cam_rgb',
'action_mode': 'rotation_bottom',
'render_mode': 'fluid',
'deterministic': False,
'render': True,
'action_repeat': 8,
'headless': True,
'num_variations': 1000,
'horizon': 100,
'use_cached_states': True,
'camera_name': 'default_camera'},
'PourWaterAmount': {'observation_mode': 'cam_rgb',
'action_mode': 'rotation_bottom',
'render_mode': 'fluid',
'action_repeat': 8,
'deterministic': False,
'render': True,
'headless': True,
'num_variations': 1000,
'use_cached_states': True,
'horizon': 100,
'camera_name': 'default_camera'},
'RopeFlatten': {
'observation_mode': 'cam_rgb',
'action_mode': 'picker',
'num_picker': 2,
'render': True,
'headless': True,
'horizon': 75,
'action_repeat': 8,
'render_mode': 'cloth',
'num_variations': 1000,
'use_cached_states': True,
'deterministic': False
},
'RopeConfiguration': {'observation_mode': 'cam_rgb',
'action_mode': 'picker',
'num_picker': 2,
'render': True,
'headless': True,
'horizon': 100, # this task is harder than just straigtening rope, therefore has larger horizon.
'action_repeat': 8,
'render_mode': 'cloth',
'num_variations': 1000,
'use_cached_states': True,
'deterministic': False},
'ClothFlatten': {'observation_mode': 'cam_rgb',
'action_mode': 'picker',
'num_picker': 2,
'render': True,
'headless': True,
'horizon': 100,
'action_repeat': 8,
'render_mode': 'cloth',
'num_variations': 1000,
'use_cached_states': True,
'deterministic': False},
'ClothFlattenPPP': {'observation_mode': 'cam_rgb',
'action_mode': 'pickerpickplace',
'num_picker': 2,
'render': True,
'headless': True,
'horizon': 20,
'action_repeat': 1,
'render_mode': 'cloth',
'num_variations': 1000,
'use_cached_states': True,
'deterministic': False},
'ClothFoldPPP': {'observation_mode': 'cam_rgb',
'action_mode': 'pickerpickplace',
'num_picker': 2,
'render': True,
'headless': True,
'horizon': 20,
'action_repeat': 1,
'render_mode': 'cloth',
'num_variations': 1000,
'use_cached_states': True,
'deterministic': False},
'ClothFold': {'observation_mode': 'cam_rgb',
'action_mode': 'picker',
'num_picker': 2,
'render': True,
'headless': True,
'horizon': 100,
'action_repeat': 8,
'render_mode': 'cloth',
'num_variations': 1000,
'use_cached_states': True,
'deterministic': False},
'ClothFoldCrumpled': {'observation_mode': 'cam_rgb',
'action_mode': 'picker',
'num_picker': 2,
'render': True,
'headless': True,
'horizon': 100,
'action_repeat': 8,
'render_mode': 'cloth',
'num_variations': 1000,
'use_cached_states': True,
'deterministic': False},
'ClothFoldDrop': {'observation_mode': 'cam_rgb',
'action_mode': 'picker',
'num_picker': 2,
'render': True,
'headless': True,
'horizon': 100,
'action_repeat': 8,
'render_mode': 'cloth',
'num_variations': 1000,
'use_cached_states': True,
'deterministic': False},
'ClothDrop': dict(observation_mode='cam_rgb',
action_mode='picker',
num_picker=2,
render=True,
headless=True,
horizon=30,
action_repeat=16,
render_mode='cloth',
num_variations=1000,
use_cached_states=True,
deterministic=False),
'PassWater': dict(observation_mode='cam_rgb',
action_mode='direct',
render=True,
headless=True,
horizon=75,
action_repeat=8,
render_mode='fluid',
deterministic=False,
num_variations=1000),
'PassWaterGoal': {
"observation_mode": 'point_cloud', # will be later wrapped by ImageEnv
"horizon": 75,
"action_mode": 'direct',
"deterministic": False,
"render_mode": 'fluid',
"render": True,
"headless": True,
"action_repeat": 8,
"num_variations": 1000,
},
"PourWaterGoal": {
'observation_mode': 'point_cloud',
'action_mode': 'direct',
'render_mode': 'fluid',
'deterministic': False,
'render': True,
'headless': True,
'num_variations': 1000,
'horizon': 100,
'camera_name': 'default_camera'
},
"ClothManipulate": dict(
observation_mode='point_cloud',
action_mode='picker',
num_picker=2,
render=True,
headless=True,
horizon=100,
action_repeat=8,
render_mode='cloth',
num_variations=1000,
deterministic=False
),
}
SOFTGYM_ENVS = OrderedDict({
'PourWater': PourWaterPosControlEnv,
'PourWaterAmount': PourWaterAmountPosControlEnv,
'PassWater': PassWater1DEnv,
'ClothFlatten': ClothFlattenEnv,
'ClothFold': ClothFoldEnv,
'ClothDrop': ClothDropEnv,
'ClothFoldDrop': ClothFoldDropEnv,
'ClothFlattenPPP': ClothFlattenEnv,
'ClothFoldPPP': ClothFoldEnv,
'ClothFoldCrumpled': ClothFoldCrumpledEnv,
'RopeFlatten': RopeFlattenEnv,
'RopeConfiguration': RopeConfigurationEnv,
})
|
python_modules/dagit/dagit_tests/starlette/test_app.py | makotonium/dagster | 4,606 | 12618977 | <filename>python_modules/dagit/dagit_tests/starlette/test_app.py<gh_stars>1000+
import gc
import objgraph
from dagit.graphql import GraphQLWS
from dagit.starlette import ROOT_ADDRESS_STATIC_RESOURCES
from dagit.version import __version__ as dagit_version
from dagster import __version__ as dagster_version
from dagster import execute_pipeline, graph, op, pipeline, reconstructable
from dagster_graphql.version import __version__ as dagster_graphql_version
from starlette.testclient import TestClient
EVENT_LOG_SUBSCRIPTION = """
subscription PipelineRunLogsSubscription($runId: ID!) {
pipelineRunLogs(runId: $runId) {
__typename
}
}
"""
def test_dagit_info(empty_app):
client = TestClient(empty_app)
response = client.get("/dagit_info")
assert response.status_code == 200
assert response.json() == {
"dagit_version": dagit_version,
"dagster_version": dagster_version,
"dagster_graphql_version": dagster_graphql_version,
}
def test_static_resources(empty_app):
client = TestClient(empty_app)
# make sure we did not fallback to the index html
# for static resources at /
for address in ROOT_ADDRESS_STATIC_RESOURCES:
response = client.get(address)
assert response.status_code == 200, response.text
assert response.headers["content-type"] != "text/html"
response = client.get("/vendor/graphql-playground/middleware.js")
assert response.status_code == 200, response.text
assert response.headers["content-type"] != "application/js"
def test_graphql_get(empty_app):
client = TestClient(empty_app)
response = client.get(
"/graphql?query={__typename}",
)
assert response.status_code == 200, response.text
assert response.json() == {"data": {"__typename": "Query"}}
def test_graphql_post(empty_app):
client = TestClient(empty_app)
response = client.post(
"/graphql?query={__typename}",
)
assert response.status_code == 200, response.text
assert response.json() == {"data": {"__typename": "Query"}}
response = client.post(
"/graphql",
json={"query": "{__typename}"},
)
assert response.status_code == 200, response.text
assert response.json() == {"data": {"__typename": "Query"}}
def test_graphql_ws_error(empty_app):
# wtf pylint
# pylint: disable=not-context-manager
with TestClient(empty_app).websocket_connect("/graphql", str(GraphQLWS.PROTOCOL)) as ws:
ws.send_json({"type": GraphQLWS.CONNECTION_INIT})
ws.send_json(
{
"type": GraphQLWS.START,
"id": "1",
"payload": {"query": "subscription { oops }"},
}
)
response = ws.receive_json()
assert response["type"] == GraphQLWS.CONNECTION_ACK
response = ws.receive_json()
assert response["id"] == "1"
assert response["type"] == GraphQLWS.ERROR
def test_graphql_ws_success(instance, empty_app):
@pipeline
def _test():
pass
result = execute_pipeline(_test, instance=instance)
run_id = result.run_id
# wtf pylint
# pylint: disable=not-context-manager
with TestClient(empty_app).websocket_connect("/graphql", GraphQLWS.PROTOCOL) as ws:
ws.send_json({"type": GraphQLWS.CONNECTION_INIT})
ws.send_json(
{
"type": GraphQLWS.START,
"id": "1",
"payload": {"query": EVENT_LOG_SUBSCRIPTION, "variables": {"runId": run_id}},
}
)
response = ws.receive_json()
assert response["type"] == GraphQLWS.CONNECTION_ACK
response = ws.receive_json()
assert response["id"] == "1"
assert response["type"] == GraphQLWS.DATA
gc.collect()
assert len(objgraph.by_type("PipelineRunObservableSubscribe")) == 1
# after exiting the context manager and closing the connection
gc.collect()
assert len(objgraph.by_type("PipelineRunObservableSubscribe")) == 0
def test_download_debug_file(instance, empty_app):
@pipeline
def _test():
pass
result = execute_pipeline(_test, instance=instance)
run_id = result.run_id
response = TestClient(empty_app).get(f"/download_debug/{run_id}")
assert response.status_code == 200
assert response.headers["content-type"] == "application/gzip"
def _simple_job():
@op
def my_op():
print("STDOUT RULEZ") # pylint: disable=print-call
@graph
def my_graph():
my_op()
return my_graph.to_job()
def test_download_compute(instance, empty_app):
result = execute_pipeline(reconstructable(_simple_job), instance=instance)
run_id = result.run_id
response = TestClient(empty_app).get(f"/download/{run_id}/my_op/stdout")
assert response.status_code == 200
assert "STDOUT RULEZ" in str(response.content)
response = TestClient(empty_app).get(f"/download/{run_id}/jonx/stdout")
assert response.status_code == 404
|
AdminServer/appscale/admin/instance_manager/utils.py | loftwah/appscale | 790 | 12618992 | """ Common functions for managing AppServer instances. """
import fnmatch
import glob
import logging
import os
import shutil
import subprocess
from appscale.admin.constants import InvalidSource
from appscale.admin.instance_manager.constants import (
CONFLICTING_JARS, LOGROTATE_CONFIG_DIR, MODIFIED_JARS)
from appscale.common.constants import CONFIG_DIR
logger = logging.getLogger(__name__)
def fetch_file(host, location):
""" Copies a file from another machine.
Args:
host: A string specifying the IP address or hostname of the remote machine.
location: A string specifying the path to the file.
"""
key_file = os.path.join(CONFIG_DIR, 'ssh.key')
remote_location = '{}:{}'.format(host, location)
scp_cmd = ['scp', '-i', key_file,
'-o', 'StrictHostKeyChecking no',
remote_location, location]
subprocess.check_call(scp_cmd)
def find_web_inf(source_path):
""" Returns the location of a Java revision's WEB-INF directory.
Args:
source_path: A string specifying the location of the revision's source.
Returns:
A string specifying the location of the WEB-INF directory.
Raises:
BadConfigurationException if the directory is not found.
"""
# Check for WEB-INF directories that contain the required appengine-web.xml.
matches = []
for root, dirs, files in os.walk(source_path):
if 'appengine-web.xml' in files and root.endswith('/WEB-INF'):
matches.append(root)
if not matches:
raise InvalidSource('Unable to find WEB-INF directory')
# Use the shortest path.
shortest_match = matches[0]
for match in matches:
if len(match.split(os.sep)) < len(shortest_match.split(os.sep)):
shortest_match = match
return shortest_match
def copy_files_matching_pattern(file_path_pattern, dest):
""" Copies files matching the pattern to the destination directory.
Args:
file_path_pattern: The pattern of the files to be copied over.
dest: The destination directory.
"""
for file in glob.glob(file_path_pattern):
shutil.copy(file, dest)
def copy_modified_jars(source_path):
""" Copies AppScale SDK modifications to the lib folder.
Args:
source_path: A string specifying the location of the source code.
"""
web_inf_dir = find_web_inf(source_path)
lib_dir = os.path.join(web_inf_dir, 'lib')
if not os.path.isdir(lib_dir):
logger.info('Creating lib directory: {}'.format(lib_dir))
os.mkdir(lib_dir)
for pattern in MODIFIED_JARS:
copy_files_matching_pattern(pattern, lib_dir)
def remove_conflicting_jars(source_path):
""" Removes jars uploaded which may conflict with AppScale jars.
Args:
source_path: A string specifying the location of the source code.
"""
lib_dir = os.path.join(find_web_inf(source_path), 'lib')
if not os.path.isdir(lib_dir):
logger.warn('Java source does not contain lib directory')
return
logger.info('Removing jars from {}'.format(lib_dir))
for file in os.listdir(lib_dir):
for pattern in CONFLICTING_JARS:
if fnmatch.fnmatch(file, pattern):
os.remove(os.path.join(lib_dir, file))
def remove_logrotate(project_id):
""" Removes logrotate script for the given project.
Args:
project_id: A string, the name of the project to remove logrotate for.
"""
app_logrotate_script = "{0}/appscale-{1}".\
format(LOGROTATE_CONFIG_DIR, project_id)
logger.debug("Removing script: {}".format(app_logrotate_script))
try:
os.remove(app_logrotate_script)
except OSError:
logging.error("Error while removing log rotation for application: {}".
format(project_id))
def setup_logrotate(app_name, log_size):
""" Creates a logrotate script for the logs that the given application
will create.
Args:
app_name: A string, the application ID.
log_size: An integer, the size of logs that are kept per application server.
The size should be in bytes.
Returns:
True on success, False otherwise.
"""
# Write application specific logrotation script.
app_logrotate_script = "{0}/appscale-{1}".\
format(LOGROTATE_CONFIG_DIR, app_name)
log_prefix = 'app___{}'.format(app_name)
# Application logrotate script content.
contents = """/var/log/appscale/{log_prefix}*.log {{
size {size}
missingok
rotate 7
compress
delaycompress
notifempty
copytruncate
}}
/opt/appscale/logserver/requests-{app_name}*.log {{
size {size}
missingok
rotate 3
compress
delaycompress
notifempty
copytruncate
}}
""".format(log_prefix=log_prefix, app_name=app_name, size=log_size)
logger.debug("Logrotate file: {} - Contents:\n{}".
format(app_logrotate_script, contents))
with open(app_logrotate_script, 'w') as app_logrotate_fd:
app_logrotate_fd.write(contents)
return True
|
tests/transforms/label/test_remap_labels.py | siahuat0727/torchio | 1,340 | 12619013 | from torchio.transforms import RemapLabels
from ...utils import TorchioTestCase
class TestRemapLabels(TorchioTestCase):
"""Tests for `RemapLabels`."""
def test_remap(self):
remapping = {1: 2, 2: 1, 5: 10, 6: 11}
remap_labels = RemapLabels(remapping=remapping)
subject = self.get_subject_with_labels(labels=remapping.keys())
transformed = remap_labels(subject)
inverse_transformed = transformed.apply_inverse_transform()
self.assertEqual(
self.get_unique_labels(subject.label),
set(remapping.keys()),
)
self.assertEqual(
self.get_unique_labels(transformed.label),
set(remapping.values()),
)
self.assertEqual(
self.get_unique_labels(inverse_transformed.label),
set(remapping.keys()),
)
|
examples/tensorflow/object_detection/preprocessing/yolo_v4_preprocessing.py | MaximProshin/nncf | 136 | 12619046 | <filename>examples/tensorflow/object_detection/preprocessing/yolo_v4_preprocessing.py
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from PIL import Image
import tensorflow as tf
from examples.tensorflow.common.object_detection.utils import box_utils
from examples.tensorflow.common.object_detection.utils.yolo_v4_utils import letterbox_resize
from examples.tensorflow.common.object_detection.utils.yolo_v4_utils import reshape_boxes
from examples.tensorflow.common.object_detection.utils.yolo_v4_utils import normalize_image
from examples.tensorflow.common.object_detection.utils.yolo_v4_utils import random_resize_crop_pad
from examples.tensorflow.common.object_detection.utils.yolo_v4_utils import random_horizontal_flip
from examples.tensorflow.common.object_detection.utils.yolo_v4_utils import random_mosaic_augment
from examples.tensorflow.common.object_detection.utils import dataloader_utils
from examples.tensorflow.common.object_detection.utils import input_utils
class YOLOv4Preprocessor:
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self, config, is_train):
"""
Initializes parameters for parsing annotations in the dataset.
"""
self._max_num_instances = config.preprocessing.get('max_num_instances', 100)
self._is_training = is_train
self._global_batch_size = config.batch_size
self._num_preprocess_workers = config.get('workers', tf.data.experimental.AUTOTUNE)
self._parse_fn = self._parse_train_data
self._parse_fn2 = self._parse_train_data2
self._input_shape = config.input_shape
self._enhance_mosaic_augment = config.preprocessing.enhance_mosaic_augment
self._anchors = config.anchors
self._num_classes = config.model_params.num_classes
self._multi_anchor_assign = config.preprocessing.multi_anchor_assign
def create_preprocess_input_fn(self):
"""Parses data to an image and associated training labels."""
return self._tfds_decoder, self._pipeline_fn
def _get_ground_truth_data(self, image, boxes, input_shape, max_boxes=100):
"""Random preprocessing for real-time data augmentation"""
image_size = image.size
model_input_size = tuple(reversed(input_shape))
image, padding_size, padding_offset = random_resize_crop_pad(image, target_size=model_input_size)
image, horizontal_flip = random_horizontal_flip(image)
image_data = np.array(image).astype(np.float32)
# Reshape boxes based on augment
boxes = reshape_boxes(boxes, src_shape=image_size, target_shape=model_input_size,
padding_shape=padding_size, offset=padding_offset,
horizontal_flip=horizontal_flip)
if len(boxes) > max_boxes:
boxes = boxes[:max_boxes]
box_data = np.zeros((max_boxes, 5))
if len(boxes) > 0:
box_data[:len(boxes)] = boxes
return image_data, box_data
def _preprocess(self, image, groundtruth_classes, groundtruth_boxes, input_shape):
image_np = image.numpy()
image_pil = Image.fromarray(image_np)
image_shape = tf.shape(input=image)[0:2]
denormalized_boxes = box_utils.denormalize_boxes(groundtruth_boxes, image_shape)
boxes = []
for denormalized_box, category_id in zip(denormalized_boxes.numpy(), groundtruth_classes.numpy()):
x_min = int(denormalized_box[1])
y_min = int(denormalized_box[0])
x_max = int(denormalized_box[3])
y_max = int(denormalized_box[2])
boxes.append([x_min, y_min, x_max, y_max, int(category_id)])
boxes = np.array(boxes)
input_shape = input_shape.numpy()
image, box = self._get_ground_truth_data(image_pil, boxes, input_shape)
return image, box
def _parse_train_data(self, data):
"""Parses data for training"""
image = data['image']
groundtruth_classes = data['groundtruth_classes']
groundtruth_boxes = data['groundtruth_boxes']
image, box = tf.py_function(self._preprocess,
[image, groundtruth_classes, groundtruth_boxes, self._input_shape],
[tf.float32, tf.float32])
image.set_shape([None, None, 3])
box.set_shape([None, 5])
image = input_utils.random_adjust_brightness(image)
image = input_utils.random_adjust_contrast(image)
image = input_utils.random_adjust_hue(image)
image = input_utils.random_adjust_saturation(image)
image = tf.math.divide(image, 255.0)
out = {}
out['image'] = image
out['box'] = box
out['source_id'] = data['source_id']
return out
def _preprocess_true_boxes(self, true_boxes,
input_shape, anchors,
num_classes, multi_anchor_assign,
iou_thresh=0.2):
"""
Preprocess true boxes to training input format
:param true_boxes: array, shape=(m, T, 5)
Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
:param input_shape: array-like, hw, multiples of 32
:param anchors: array, shape=(N, 2), wh
:param num_classes: integer
:param multi_anchor_assign: boolean, whether to use iou_thresh to assign multiple
anchors for a single ground truth
:return y_true: list of array, shape like yolo_outputs, xywh are reletive value
"""
assert (true_boxes[..., 4] < num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors)//3
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [0,1,2]]
#Transform box info to (x_center, y_center, box_width, box_height, cls_id)
#and image relative coordinate.
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
batch_size = true_boxes.shape[0]
grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
y_true = [np.zeros((batch_size, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + num_classes),
dtype='float32') for l in range(num_layers)]
# Expand dim to apply broadcasting.
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
valid_mask = boxes_wh[..., 0] > 0
for b in range(batch_size):
# Discard zero rows.
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0:
continue
# Expand dim to apply broadcasting.
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area + 0.0000001)
# Sort anchors according to IoU score
# to find out best assignment
best_anchors = np.argsort(iou, axis=-1)[..., ::-1]
if not multi_anchor_assign:
best_anchors = best_anchors[..., 0]
# keep index dim for the loop in following
best_anchors = np.expand_dims(best_anchors, -1)
for t, row in enumerate(best_anchors):
for l in range(num_layers):
for n in row:
# use different matching policy for single & multi anchor assign
if multi_anchor_assign:
matching_rule = (iou[t, n] > iou_thresh and n in anchor_mask[l])
else:
matching_rule = (n in anchor_mask[l])
if matching_rule:
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5+c] = 1
return y_true
def _preprocess2(self, image_data, box_data):
image_data = image_data.numpy()
box_data = box_data.numpy()
if self._enhance_mosaic_augment:
# add random mosaic augment on batch ground truth data
image_data, box_data = random_mosaic_augment(image_data, box_data, prob=0.2)
anchors = np.array(self._anchors).astype(float).reshape(-1, 2)
y_true1, y_true2, y_true3 = self._preprocess_true_boxes(box_data,
self._input_shape,
anchors,
self._num_classes,
self._multi_anchor_assign)
return image_data, y_true1, y_true2, y_true3
def _parse_train_data2(self, data):
image_data = data['image']
box_data = data['box']
im_shape = image_data.shape
image_data, out0, out1, out2 = tf.py_function(self._preprocess2,
[image_data, box_data],
[tf.float32, tf.float32, tf.float32, tf.float32])
image_data.set_shape(im_shape)
out0.set_shape([im_shape[0], 19, 19, 3, 85])
out1.set_shape([im_shape[0], 38, 38, 3, 85])
out2.set_shape([im_shape[0], 76, 76, 3, 85])
labels = {
'y_true_0': out0,
'y_true_1': out1,
'y_true_2': out2,
}
return image_data, labels
def _get_image_info(self, image):
desired_size = tf.convert_to_tensor(self._input_shape, dtype=tf.float32)
image_size = tf.cast(tf.shape(input=image)[0:2], tf.float32)
scaled_size = desired_size
scale = tf.minimum(scaled_size[0] / image_size[0],
scaled_size[1] / image_size[1])
scaled_size = tf.round(image_size * scale)
# Computes 2D image_scale.
image_scale = scaled_size / image_size
offset = tf.zeros((2,), tf.int32)
image_info = tf.stack([
image_size,
tf.cast(desired_size, tf.float32), image_scale,
tf.cast(offset, tf.float32)
])
return image_info
def _preprocess_predict_image(self, image):
image = image.numpy()
model_image_size = self._input_shape
image_pil = Image.fromarray(image)
if image_pil.mode != 'RGB':
image_pil = image_pil.convert('RGB')
resized_image = letterbox_resize(image_pil, tuple(reversed(model_image_size)))
image_data = np.asarray(resized_image).astype('float32')
image_data = normalize_image(image_data)
image_data = tf.convert_to_tensor(image_data, dtype=tf.float32)
return image_data
def _parse_predict_data(self, data):
"""Parses data for prediction"""
image_data = data['image']
image_shape = tf.shape(input=image_data)[0:2]
# needed only for eval
image_info = self._get_image_info(image_data)
# image preprocessing
image_data = tf.py_function(self._preprocess_predict_image, [image_data], Tout=tf.float32)
image_data.set_shape([None, None, 3])
labels = {
'image_info': image_info,
}
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(data['groundtruth_boxes'], image_shape)
groundtruths = {
'source_id': data['source_id'],
'num_detections': tf.squeeze(tf.shape(data['groundtruth_classes'])),
'boxes': boxes,
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = dataloader_utils.process_source_id(groundtruths['source_id'])
groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(groundtruths, self._max_num_instances)
labels.update(groundtruths)
return image_data, labels
def _tfds_decoder(self, features_dict):
def _decode_image(features):
image = tf.image.decode_jpeg(features['image'], channels=3, dct_method='INTEGER_ACCURATE')
image.set_shape([None, None, 3])
return image
def _convert_labels_to_91_classes(features):
# 0..79 --> 0..90
match = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
67, 70, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 84, 85, 86, 87, 88, 89, 90], dtype=tf.int64)
labels = features['objects']['label']
labels = tf.gather(match, labels, axis=None)
return labels
image = _decode_image(features_dict)
if self._is_training:
labels = features_dict['objects']['label']
else:
labels = _convert_labels_to_91_classes(features_dict)
decoded_tensors = {
'image': image,
'source_id': tf.cast(features_dict['image/id'], tf.int32),
'groundtruth_classes': labels,
'groundtruth_is_crowd': features_dict['objects']['is_crowd'],
'groundtruth_area': features_dict['objects']['area'],
'groundtruth_boxes': features_dict['objects']['bbox'],
}
return decoded_tensors
def _pipeline_fn(self, dataset, decoder_fn):
if self._is_training:
preprocess_input_fn = self._parse_fn
preprocess_pipeline = lambda record: preprocess_input_fn(decoder_fn(record))
dataset = dataset.map(preprocess_pipeline, num_parallel_calls=self._num_preprocess_workers)
dataset = dataset.batch(self._global_batch_size, drop_remainder=True)
# part of preprocessing which requires batches
preprocess_input_fn2 = self._parse_fn2
dataset = dataset.map(preprocess_input_fn2, num_parallel_calls=self._num_preprocess_workers)
else:
preprocess_input_fn = self._parse_predict_data
preprocess_pipeline = lambda record: preprocess_input_fn(decoder_fn(record))
dataset = dataset.map(preprocess_pipeline, num_parallel_calls=self._num_preprocess_workers)
dataset = dataset.batch(self._global_batch_size, drop_remainder=True)
return dataset
|
smtbx/refinement/constraints/geometrical/all.py | dperl-sol/cctbx_project | 155 | 12619052 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from smtbx.refinement.constraints.geometrical.hydrogens import *
|
bcs-ui/backend/container_service/clusters/flow_views/configs/k8s.py | laodiu/bk-bcs | 599 | 12619082 | <filename>bcs-ui/backend/container_service/clusters/flow_views/configs/k8s.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
K8S 获取相关配置
NOTE: 现阶段还没有kube agent的相关配置,需要bowei处理下后面的流程
"""
import json
import logging
import socket
from urllib.parse import urlparse
from django.conf import settings
from backend.container_service.clusters import constants
from backend.container_service.clusters.utils import gen_hostname
from backend.container_service.misc.bke_client import BCSClusterClient
from backend.utils.error_codes import error_codes
logger = logging.getLogger(__name__)
BCS_SERVER_HOST = settings.BCS_SERVER_HOST['prod']
class ClusterConfig(object):
def __init__(self, base_cluster_config, area_info, cluster_name=""):
self.k8s_config = base_cluster_config
self.area_config = json.loads(area_info.get('configuration', '{}'))
def _split_ip_by_role(self, ip_list):
"""get master/etcd by role
NOTE: master and etcd are same in the current stage
"""
return ip_list, ip_list
def _get_clusters_vars(self, cluster_id, kube_master_list, etcd_list):
masters, etcdpeers, clusters = {}, {}, {}
for etcd_ip in etcd_list:
host_name = gen_hostname(etcd_ip, cluster_id, is_master=True)
etcdpeers[host_name] = etcd_ip
clusters[host_name] = etcd_ip
for master_ip in kube_master_list:
host_name = gen_hostname(master_ip, cluster_id, is_master=True)
masters[host_name] = master_ip
clusters[host_name] = master_ip
return masters, etcdpeers, clusters
def _get_common_vars(self, cluster_id, masters, etcdpeers, clusters, cluster_state):
self.k8s_config['common'].update(
{
'cluster_id': cluster_id,
'etcd_peers': etcdpeers,
'cluster_masters': masters,
'clusters': clusters,
'bk_registry': self.area_config['jfrog_registry'],
'dns_host': self.area_config['dns_host'],
'zk_urls': ','.join(self.area_config["zk_hosts"]),
}
)
if cluster_state == constants.ClusterState.BCSNew.value:
return
# NOTE: 针对非bcs平台创建集群,配置中common支持websvr,这里websvr是字符串
web_svr = self.k8s_config.get("websvr")
if web_svr:
self.k8s_config["common"]["websvr"] = web_svr[0]
def _get_node_vars(self, master_legal_host):
zk_urls = ','.join(self.area_config["zk_hosts"])
# 为防止key对应内容的变动,单独更新key
self.k8s_config['kubernetes.node'].update(
{'legal_hosts': master_legal_host, 'is_kube_master': True, 'standalone_kubelet': True}
)
self.k8s_config['kubernetes.master'].update({'legal_hosts': master_legal_host})
self.k8s_config['docker'].update({'legal_hosts': master_legal_host})
self.k8s_config['bcs.driver'].update({'legal_hosts': master_legal_host, 'zk_urls': zk_urls})
self.k8s_config['bcs.datawatch'].update({'legal_hosts': master_legal_host, 'zk_urls': zk_urls})
def _get_etcd_vars(self, etcd_legal_host):
self.k8s_config['etcd'].update({'legal_hosts': etcd_legal_host})
def _add_kube_agent_config(self, cluster_id, params):
"""针对纳管集群,需要在创建集群时,传递kube client组件需要的配置信息"""
if params.get("cluster_state") == constants.ClusterState.BCSNew.value:
return
# get bcs agent info
bcs_client = BCSClusterClient(
host=BCS_SERVER_HOST,
access_token=params["access_token"],
project_id=params["project_id"],
cluster_id=cluster_id,
)
bcs_cluster_info = bcs_client.get_or_register_bcs_cluster()
if not bcs_cluster_info.get("result"):
err_msg = bcs_cluster_info.get("message", "request bcs agent api error")
raise error_codes.APIError(err_msg)
bcs_cluster_data = bcs_cluster_info.get("data", {})
if not bcs_cluster_data:
raise error_codes.APIError("bcs agent api response is null")
self.k8s_config["bcs.kube_agent"].update(
{
"register_token": bcs_cluster_data["token"],
"bcs_api_server": BCS_SERVER_HOST,
"register_cluster_id": bcs_cluster_data["bcs_cluster_id"],
}
)
def get_request_config(self, cluster_id, master_ips, need_nat=True, **kwargs):
# 获取master和etcd ip列表
kube_master_list, etcd_list = self._split_ip_by_role(master_ips)
# 组装name: ip map
masters, etcdpeers, clusters = self._get_clusters_vars(cluster_id, kube_master_list, etcd_list)
# 更新组装参数
self._get_common_vars(cluster_id, masters, etcdpeers, clusters, kwargs.get("cluster_state"))
master_legal_host, etcd_legal_host = list(masters.keys()), list(etcdpeers.keys())
self._get_node_vars(master_legal_host)
self._get_etcd_vars(etcd_legal_host)
self._add_kube_agent_config(cluster_id, kwargs)
return self.k8s_config
class NodeConfig(object):
def __init__(self, snapshot_config, op_type):
self.k8s_config = snapshot_config
self.op_type = op_type
def _get_clusters_vars(self, cluster_id, node_ip_list, master_ip_list):
clusters, masters, node_legals = {}, {}, {}
for node_ip in node_ip_list:
host_name = gen_hostname(node_ip, cluster_id, is_master=False)
node_legals[host_name] = node_ip
clusters[host_name] = node_ip
for master_ip in master_ip_list:
host_name = gen_hostname(master_ip, cluster_id, is_master=True)
clusters[host_name] = master_ip
masters[host_name] = master_ip
return clusters, masters, node_legals
def _get_common_vars(self, cluster_id, masters, clusters):
self.k8s_config['common'].update({'cluster_id': cluster_id, 'cluster_masters': masters, 'clusters': clusters})
def _get_network_vars(self, node_legals, kubeapps_master_legal_host):
self.k8s_config['network_plugin'].update({'legal_hosts': list(node_legals.keys()), 'plugin_type': 'flannel'})
if self.op_type == constants.OpType.ADD_NODE.value:
self.k8s_config['network_plugin']['legal_hosts'] = kubeapps_master_legal_host
self.k8s_config['kubeapps.network_plugin'] = self.k8s_config['network_plugin']
self.k8s_config['dns'].update({'legal_hosts': kubeapps_master_legal_host, 'dns_type': 'kubedns'})
def _get_node_vars(self, node_legals, kubeapps_master_legal_host, access_token, project_id, cluster_id):
legal_hosts = list(node_legals.keys())
self.k8s_config['kubernetes.node'].update(
{'legal_hosts': legal_hosts, 'is_kube_master': False, 'standalone_kubelet': False}
)
self.k8s_config['docker'].update({'legal_hosts': legal_hosts})
self.k8s_config['agent.cadvisorbeat'].update({'legal_hosts': legal_hosts})
self.k8s_config['agent.logbeat'].update({'legal_hosts': legal_hosts})
if self.op_type == constants.OpType.ADD_NODE.value:
# get bcs agent info
bcs_client = BCSClusterClient(
host=BCS_SERVER_HOST, access_token=access_token, project_id=project_id, cluster_id=cluster_id
)
bcs_cluster_info = bcs_client.get_or_register_bcs_cluster()
if not bcs_cluster_info.get('result'):
err_msg = bcs_cluster_info.get('message', 'request bcs agent api error')
raise error_codes.APIError(err_msg)
bcs_cluster_data = bcs_cluster_info.get('data', {})
if not bcs_cluster_data:
raise error_codes.APIError('bcs agent api response is null')
self.k8s_config['bcs.kube_agent'].update(
{
'legal_hosts': kubeapps_master_legal_host,
'register_token': bcs_cluster_data['token'],
'bcs_api_server': BCS_SERVER_HOST,
'register_cluster_id': bcs_cluster_data['bcs_cluster_id'],
}
)
self.k8s_config['kubeapps.kube_agent'].update({'legal_hosts': kubeapps_master_legal_host})
# 根据操作
if self.op_type == constants.OpType.DELETE_NODE.value:
self.k8s_config['kubeapps.node'].update({'legal_hosts': kubeapps_master_legal_host, 'nodes': node_legals})
def _get_secrets_vars(self):
self.k8s_config['secrets.kubernetes'].update({'legal_hosts': []})
def _get_prometheus(self, kubeapps_master_legal_host):
"""获取prometheus"""
self.k8s_config.update(
{
'kubeapps.prometheus': {'legal_hosts': kubeapps_master_legal_host},
'agent.prometheus': {'legal_hosts': kubeapps_master_legal_host},
}
)
def get_request_config(self, access_token, project_id, cluster_id, master_ip_list, ip_list):
# 获取master、node
clusters, masters, node_legals = self._get_clusters_vars(cluster_id, ip_list, master_ip_list)
kubeapps_master_legal_host = [
gen_hostname(master_ip_list[0], cluster_id, True),
]
self._get_common_vars(cluster_id, masters, clusters)
self._get_node_vars(node_legals, kubeapps_master_legal_host, access_token, project_id, cluster_id)
self._get_network_vars(node_legals, kubeapps_master_legal_host)
self._get_secrets_vars()
self._get_prometheus(kubeapps_master_legal_host)
return self.k8s_config
|
library/oci_tenancy_facts.py | slmjy/oci-ansible-modules | 106 | 12619095 | <gh_stars>100-1000
#!/usr/bin/python
# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_tenancy_facts
short_description: Retrieve details about a tenancy in Oracle Cloud Infrastructure
description:
- This module retrieves details about a tenancy in Oracle Cloud Infrastructure.
version_added: "2.5"
options:
tenancy_id:
description: The OCID of the tenancy for which details needs to be retrieved
required: true
aliases: [ 'id' ]
author: "<NAME> (@sivakumart)"
extends_documentation_fragment: oracle
"""
EXAMPLES = """
- name: Get details of the specified tenancy
oci_tenancy_facts:
id: "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx...o244pucq"
"""
RETURN = """
tenancy:
description: Information about the specified tenancy
returned: on success
type: complex
contains:
id:
description: The OCID of the tenancy.
returned: always
type: string
sample: "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx...o244pucq"
name:
description: The name of the tenancy.
returned: always
type: string
sample: "Acme corp"
description:
description: The description of the tenancy.
returned: always
type: string
sample: "Acme corp's tenancy"
home_region_key:
description: The region key for the tenancy's home region.
returned: always
type: string
sample: "IAD"
sample: {
"home_region_key": null,
"description": "Acme Corp's tenancy",
"name": "acme-corp",
"id": "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx...o244pucq"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.identity.identity_client import IdentityClient
from oci.util import to_dict
from oci.exceptions import ServiceError
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
def get_tenancy_details(identity_client, module):
try:
tenancy_ocid = module.params["tenancy_id"]
tenancy = oci_utils.call_with_backoff(
identity_client.get_tenancy, tenancy_id=tenancy_ocid
).data
except ServiceError as ex:
module.fail_json(msg=ex.message)
return to_dict(tenancy)
def main():
module_args = oci_utils.get_common_arg_spec()
module_args.update(dict(tenancy_id=dict(type="str", required=True, aliases=["id"])))
module = AnsibleModule(argument_spec=module_args, supports_check_mode=False)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
identity_client = oci_utils.create_service_client(module, IdentityClient)
result = get_tenancy_details(identity_client, module)
module.exit_json(tenancy=result)
if __name__ == "__main__":
main()
|
tests/model_tests/app/signals.py | if1live/importd | 183 | 12619099 | from django.conf import settings
settings.SIGNALS_IMPORTED=True |
picoCTF-web/tests/integration/test_teams.py | minhnq1618/picoCTF | 280 | 12619212 | """Tests for the /api/v1/teams routes."""
from pytest_mongo import factories
from pytest_redis import factories
from .common import ( # noqa (fixture)
ADMIN_DEMOGRAPHICS,
clear_db,
client,
decode_response,
get_csrf_token,
register_test_accounts,
TEACHER_DEMOGRAPHICS,
STUDENT_DEMOGRAPHICS,
get_conn,
)
def test_create_team(mongo_proc, redis_proc, client): # noqa (fixture)
"""Tests the POST /teams endpoint."""
clear_db()
register_test_accounts()
# Attempt to create a new team as a teacher
client.post(
"/api/v1/user/login",
json={
"username": TEACHER_DEMOGRAPHICS["username"],
"password": <PASSWORD>["password"],
},
)
res = client.post(
"/api/v1/teams", json={"team_name": "newteam", "team_password": "<PASSWORD>"}
)
assert res.status_code == 403
assert res.json["message"] == "Teachers may not create teams"
client.get("/api/v1/user/logout")
# Attempt to create a team with a name previously used by a user
client.post(
"/api/v1/user/login",
json={
"username": STUDENT_DEMOGRAPHICS["username"],
"password": <PASSWORD>["password"],
},
)
res = client.post(
"/api/v1/teams",
json={"team_name": ADMIN_DEMOGRAPHICS["username"], "team_password": "<PASSWORD>"},
)
assert res.status_code == 409
assert res.json["message"] == "There is already a user with this name."
# Add a mock team and attempt to create a team with the same name
db = get_conn()
db.teams.insert({"team_name": "test teamname"})
res = client.post(
"/api/v1/teams", json={"team_name": "test teamname", "team_password": "<PASSWORD>"}
)
assert res.status_code == 409
assert res.json["message"] == "There is already a team with this name."
# Create and join a team
res = client.post(
"/api/v1/teams", json={"team_name": "newteam", "team_password": "<PASSWORD>"}
)
assert res.status_code == 201
assert res.json["success"] is True
new_tid = res.json["tid"]
# Check that membership has been transferred
user = db.users.find_one({"username": STUDENT_DEMOGRAPHICS["username"]})
old_team = db.teams.find_one({"team_name": STUDENT_DEMOGRAPHICS["username"]})
new_team = db.teams.find_one({"tid": new_tid})
assert user["tid"] == new_tid
assert old_team["size"] == 0
assert new_team["size"] == 1
# Attempt to create another team as the same user
res = client.post(
"/api/v1/teams", json={"team_name": "newteam2", "team_password": "<PASSWORD>"}
)
assert res.status_code == 422
assert (
res.json["message"] == "You can only create one new team per " + "user account!"
)
|
torchpq/legacy/IVFPQTopk.py | mhamilton723/TorchPQ | 103 | 12619214 | <filename>torchpq/legacy/IVFPQTopk.py
import torch
import numpy as np
import math
from ..kernels import ComputeProductCuda
class IVFPQTopk:
def __init__(self,
n_subvectors,
n_clusters,
n_cs=4,
):
assert torch.cuda.is_available()
self.n_subvectors = n_subvectors
self.n_clusters = n_clusters
self.n_cs = n_cs
self.sm_size = n_subvectors * 256 * 4
self.compute_product = ComputeProductCuda(
m=n_subvectors,
k=n_clusters,
n_cs=n_cs,
sm_size=self.sm_size
)
@staticmethod
def remaining_memory():
if torch.cuda.is_available():
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
remaining = total_memory - torch.cuda.memory_reserved()
else:
remaining = 0
return remaining
def get_similarity(self, data, precomputed, is_empty, div_start, div_size):
max_out_size = div_size.sum(dim=1).max().item()
n_subvectors, n_query, n_clusters = precomputed.shape
n_probe = div_start.shape[1]
values, indices = self.compute_product(
data = data,
precomputed = precomputed,
is_empty = is_empty,
div_start = div_start,
div_size = div_size,
max_out_size = max_out_size,
)
return values, indices
def __call__(self, k, data, precomputed, is_empty, div_start, div_size):
"""
k: dtype : int
data: shape : [n_subvectors // n_cs, n_data, n_cs], dtype : uint8
precomputed: shape : [n_subvectors, n_query, n_clusters], dtype : float32
is_empty: shape : [n_data], dtype : uint8
div_start: shape : [n_query, n_probe], dtype : int32
div_size: shape : [n_query, n_probe], dtype : int32
"""
max_out_size = div_size.sum(dim=1).max().item()
n_subvectors, n_query, n_clusters = precomputed.shape
n_probe = div_start.shape[1]
final_v = torch.zeros(n_query, k, device="cuda:0", dtype=torch.float32)
final_i = torch.zeros(n_query, k, device="cuda:0", dtype=torch.int32)
remaining = self.remaining_memory()
n_partitions = 1
while True:
if n_partitions > n_query:
raise RuntimeError("No enough GPU memory")
sub_n_query = math.ceil(n_query / n_partitions)
required = sub_n_query * max_out_size * 2 * 4
if n_partitions > 1:
required += sub_n_query * n_subvectors * n_clusters * 4
required += sub_n_query * n_probe * 2 * 4
if required <= remaining:
break
n_partitions *= 2
for i in range(n_partitions):
start = i * sub_n_query
end = (i+1) * sub_n_query
if end > n_query:
end = n_query
if n_partitions > 1:
sub_precomputed = precomputed[:, start:end].contiguous()
sub_div_start = div_start[start:end].contiguous()
sub_div_size = div_size[start:end].contiguous()
sub_mos = sub_div_size.sum(dim=1).max().item()
else:
sub_precomputed = precomputed
sub_div_start = div_start
sub_div_size = div_size
sub_mos = max_out_size
sub_v, sub_i = self.compute_product(
data = data,
precomputed = sub_precomputed,
is_empty = is_empty,
div_start = sub_div_start,
div_size = sub_div_size,
max_out_size = sub_mos,
)
del sub_precomputed
sub_k = min(k, sub_mos)
sorted_v, sorted_i = torch.topk(sub_v, dim=-1, k=sub_k)
del sub_v
final_v[start:end, :sub_k] = sorted_v
del sorted_v
final_i[start:end, :sub_k] = torch.gather(input=sub_i, index=sorted_i, dim=1)
del sub_i, sorted_i
### TEST
# def naive_pqd(data, distances, is_empty):
# o, n, q = data.shape
# m = o * q
# arange = torch.arange(m, device="cuda:0")
# data = data.transpose(0, 1).reshape(n,m)
# data = data[~is_empty ]
# result = distances[arange, :, data[:].long() ].sum(dim=1).t()
# return result
return (final_v, final_i) |
applications/FluidTransportApplication/python_scripts/apply_vector_constraint_function_process.py | lkusch/Kratos | 778 | 12619219 | <filename>applications/FluidTransportApplication/python_scripts/apply_vector_constraint_function_process.py
import KratosMultiphysics
import KratosMultiphysics.FluidTransportApplication as KratosFluidTransport
import math
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ApplyVectorConstraintFunctionProcess(Model, settings["Parameters"])
## All the python processes should be derived from "python_process"
class ApplyVectorConstraintFunctionProcess(KratosMultiphysics.Process):
def __init__(self, Model, settings ):
KratosMultiphysics.Process.__init__(self)
model_part = Model[settings["model_part_name"].GetString()]
self.components_process_list = []
if settings["active"][0].GetBool() == True:
for node in model_part.Nodes:
velocity = 10000*node.Y*(1-node.X*node.X)
#velocity = 0.8 * (node.Y - 0.5)
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_X,velocity)
if settings["active"][1].GetBool() == True:
for node in model_part.Nodes:
velocity = -10000*node.X*(1-node.Y*node.Y)
#velocity = - 0.8 * (node.X - 0.5)
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_Y,velocity)
if settings["active"][2].GetBool() == True:
for node in model_part.Nodes:
velocity = 0.0
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_Z,velocity)
# def ExecuteInitialize(self):
# for component in self.components_process_list:
# component.ExecuteInitialize()
# def ExecuteInitializeSolutionStep(self):
# for component in self.components_process_list:
# component.ExecuteInitializeSolutionStep() |
utils/lib/pb_type_xml.py | bl0x/symbiflow-arch-defs | 183 | 12619245 | <reponame>bl0x/symbiflow-arch-defs
import os
import lxml.etree as ET
XI_URL = "http://www.w3.org/2001/XInclude"
XI_INCLUDE = "{%s}include" % XI_URL
VPR_TILE_PREFIX = 'BLK-TL-'
def add_vpr_tile_prefix(tile):
""" Add tile prefix.
This avoids namespace collision when embedding a site (e.g. SLICEL) as a
tile.
"""
return VPR_TILE_PREFIX + tile
def object_ref(pb_name, pin_name, pb_idx=None, pin_idx=None):
pb_addr = ''
if pb_idx is not None:
pb_addr = '[{}]'.format(pb_idx)
pin_addr = ''
if pin_idx is not None:
pin_addr = '[{}]'.format(pin_idx)
return '{}{}.{}{}'.format(pb_name, pb_addr, pin_name, pin_addr)
def add_pinlocations(
tile_name, xml, fc_xml, pin_assignments, wires, sub_tile_name=None
):
""" Adds the pin locations.
It requires the ports of the physical tile which are retrieved
by the pb_type.xml definition.
Optionally, a sub_tile_name can be assigned. This is necessary to have
unique sub tile names in case a tile contains more than one sub_tile types.
"""
pinlocations_xml = ET.SubElement(
xml, 'pinlocations', {
'pattern': 'custom',
}
)
sides = {}
for pin in wires:
for side in pin_assignments['pin_directions'][tile_name][pin]:
if side not in sides:
sides[side] = []
if sub_tile_name is not None:
name = sub_tile_name
else:
name = tile_name
sides[side].append(object_ref(add_vpr_tile_prefix(name), pin))
for side, pins in sides.items():
ET.SubElement(pinlocations_xml, 'loc', {
'side': side.lower(),
}).text = ' '.join(pins)
direct_pins = set()
for direct in pin_assignments['direct_connections']:
if direct['from_pin'].split('.')[0] == tile_name:
direct_pins.add(direct['from_pin'].split('.')[1])
if direct['to_pin'].split('.')[0] == tile_name:
direct_pins.add(direct['to_pin'].split('.')[1])
for fc_override in direct_pins:
ET.SubElement(
fc_xml, 'fc_override', {
'fc_type': 'frac',
'fc_val': '0.0',
'port_name': fc_override,
}
)
def add_fc(xml):
fc_xml = ET.SubElement(
xml, 'fc', {
'in_type': 'abs',
'in_val': '2',
'out_type': 'abs',
'out_val': '2',
}
)
return fc_xml
def add_switchblock_locations(xml):
ET.SubElement(xml, 'switchblock_locations', {
'pattern': 'all',
})
def get_site_pin_wire(equivalent_site_pin, site_pins):
"""Returns the site pin wire name of the original site location, given the site pin
from the equivalent site.
This function requires:
- equivalent_site_pin: pin belonging to the equivalent site used to find
the corresponding pin of the original site
- site_pins: list of pins belonging to the original site
We make the assumption that the name of the pin (prior to stripping the tile name prefix)
is the same, both for the original and the equivalent sites:
"""
for site_pin in site_pins:
if site_pin.name == equivalent_site_pin.name:
return site_pin.wire
assert False, "No equivalent pin found!"
def start_sub_tile(sub_tile_name, input_wires, output_wires):
"""This function returns the sub tile definition for a given tile.
This function requires:
- sub_tile_name: this must be a unique name among the sibling sub_tiles
of a given tile.
- input/output wires: all the input/output wires of the tile that need
to be assigned to the new sub tile.
In case there are two or more subtile that are the same
but differ only in pinlocations, the I/O wires must be
the same for each sub tile.
The returned sub tile contains only input, output and clock ports.
The rest of the tags are going to be added by the caller.
As default, each sub tile has capacity of 1. If there are two or more sites for
each site type in a tile, there will be the same number of sub tiles, each with
capacity of 1.
"""
sub_tile_xml = ET.Element(
'sub_tile', {
'name': add_vpr_tile_prefix(sub_tile_name),
'capacity': "1",
}
)
# Input definitions for the TILE
sub_tile_xml.append(ET.Comment(" Sub Tile Inputs "))
for name in sorted(input_wires):
input_type = 'input'
if name.startswith('CLK_BUFG_'):
if name.endswith('I0') or name.endswith('I1'):
input_type = 'clock'
elif 'CLK' in name:
input_type = 'clock'
ET.SubElement(
sub_tile_xml,
input_type,
{
'name': name,
'num_pins': '1'
},
)
# Output definitions for the TILE
sub_tile_xml.append(ET.Comment(" Sub Tile Outputs "))
for name in sorted(output_wires):
ET.SubElement(
sub_tile_xml,
'output',
{
'name': name,
'num_pins': '1'
},
)
return sub_tile_xml
def start_heterogeneous_tile(
tile_name,
pin_assignments,
sites,
equivalent_sites,
):
""" Returns a new tile xml definition.
Input parameters are:
- tile_name: name to assign to the tile
- pin_assignments: location of the pins to correctly build pin locations
- sites: set of pb_types that are going to be related to this tile
Sites contains a set of different kinds of sub tiles to be added to the tile definition,
each corresponding to one of the site types present in the parameter.
As an example:
- HCLK_IOI has in total 9 sites:
- 4 BUFR
- 4 BUFIO
- 1 IDELAYCTRL
The resulting sites parameter is a dictionary that looks like this:
{
"BUFR": [bufr_instance_1, bufr_instance_2, ...],
"BUFIO": [bufio_instance_1, bufio_instance_2, ...],
"IDELAYCTRL": [idleayctrl_instance_1]
}
"""
# Check whether sites and Input/Output wires are mutually exclusive
assert sites
tile_xml = ET.Element(
'tile',
{
'name': add_vpr_tile_prefix(tile_name),
},
nsmap={'xi': XI_URL},
)
for num_sub_tile, site_tuple in enumerate(sites):
_, site, input_wires, output_wires = site_tuple
site_type = site.type
sub_tile_name = "{}_{}_{}".format(tile_name, site_type, num_sub_tile)
sub_tile_xml = start_sub_tile(sub_tile_name, input_wires, output_wires)
fc_xml = add_fc(sub_tile_xml)
add_pinlocations(
tile_name,
sub_tile_xml,
fc_xml,
pin_assignments,
set(input_wires) | set(output_wires),
sub_tile_name=sub_tile_name
)
equivalent_sites_xml = ET.Element('equivalent_sites')
site_xml = ET.Element(
'site', {
'pb_type': add_vpr_tile_prefix(site.type),
'pin_mapping': 'custom'
}
)
site_pins = site.site_pins
for site_pin in site_pins:
add_tile_direct(
site_xml,
tile=object_ref(
add_vpr_tile_prefix(sub_tile_name),
site_pin.wire,
),
pb_type=object_ref(
pb_name=add_vpr_tile_prefix(site.type),
pin_name=site_pin.name,
),
)
equivalent_sites_xml.append(site_xml)
for equivalent_site_type in equivalent_sites[site_type]:
eq_site = [
s[1] for s in sites if s[1].type == equivalent_site_type
][0]
site_xml = ET.Element(
'site', {
'pb_type': add_vpr_tile_prefix(eq_site.type),
'pin_mapping': 'custom'
}
)
for equivalent_site_pin in eq_site.site_pins:
site_pin_wire = get_site_pin_wire(
equivalent_site_pin, site_pins
)
add_tile_direct(
site_xml,
tile=object_ref(
add_vpr_tile_prefix(sub_tile_name),
site_pin_wire,
),
pb_type=object_ref(
pb_name=add_vpr_tile_prefix(eq_site.type),
pin_name=equivalent_site_pin.name,
),
)
equivalent_sites_xml.append(site_xml)
sub_tile_xml.append(equivalent_sites_xml)
tile_xml.append(sub_tile_xml)
return tile_xml
def start_tile(
tile_name,
pin_assignments,
input_wires,
output_wires,
):
""" Returns a new tile xml definition.
Input parameters are:
- tile_name: name to assign to the tile
- pin_assignments: location of the pins to correctly build pin locations
- input and output wires: needed to correctly assign input, output and clock ports
"""
assert bool(input_wires) and bool(output_wires)
tile_xml = ET.Element(
'tile',
{
'name': add_vpr_tile_prefix(tile_name),
},
nsmap={'xi': XI_URL},
)
sub_tile_xml = start_sub_tile(tile_name, input_wires, output_wires)
fc_xml = add_fc(sub_tile_xml)
add_pinlocations(
tile_name, sub_tile_xml, fc_xml, pin_assignments,
set(input_wires) | set(output_wires)
)
tile_xml.append(sub_tile_xml)
return tile_xml
def start_pb_type(
pb_type_name,
pin_assignments,
input_wires,
output_wires,
):
""" Starts a pb_type by adding input, clock and output tags. """
pb_type_xml = ET.Element(
'pb_type',
{
'name': add_vpr_tile_prefix(pb_type_name),
},
nsmap={'xi': XI_URL},
)
pb_type_xml.append(ET.Comment(" Tile Inputs "))
# Input definitions for the TILE
for name in sorted(input_wires):
input_type = 'input'
if name.startswith('CLK_BUFG_'):
if name.endswith('I0') or name.endswith('I1'):
input_type = 'clock'
elif 'CLK' in name:
input_type = 'clock'
ET.SubElement(
pb_type_xml,
input_type,
{
'name': name,
'num_pins': '1'
},
)
pb_type_xml.append(ET.Comment(" Tile Outputs "))
for name in sorted(output_wires):
# Output definitions for the TILE
ET.SubElement(
pb_type_xml,
'output',
{
'name': name,
'num_pins': '1'
},
)
pb_type_xml.append(ET.Comment(" Internal Sites "))
return pb_type_xml
def add_tile_direct(xml, tile, pb_type):
""" Add a direct tag to the interconnect_xml. """
ET.SubElement(xml, 'direct', {'from': tile, 'to': pb_type})
def remove_vpr_tile_prefix(name):
""" Removes tile prefix.
Raises
------
Assert error if name does not start with VPR_TILE_PREFIX
"""
assert name.startswith(VPR_TILE_PREFIX)
return name[len(VPR_TILE_PREFIX):]
def write_xml(fname, xml):
""" Writes XML to disk. """
pb_type_str = ET.tostring(xml, pretty_print=True).decode('utf-8')
dirname, basefname = os.path.split(fname)
os.makedirs(dirname, exist_ok=True)
with open(fname, 'w') as f:
f.write(pb_type_str)
f.close()
class ModelXml(object):
""" Simple model.xml writter. """
def __init__(self, f, site_directory):
self.f = f
self.model_xml = ET.Element(
'models',
nsmap={'xi': XI_URL},
)
self.site_model = site_directory + "/{0}/{1}.model.xml"
def add_model_include(self, site_type, instance_name):
ET.SubElement(
self.model_xml, XI_INCLUDE, {
'href':
self.site_model.format(
site_type.lower(), instance_name.lower()
),
'xpointer':
"xpointer(models/child::node())"
}
)
def write_model(self):
write_xml(self.f, self.model_xml)
def add_direct(xml, input, output):
""" Add a direct tag to the interconnect_xml. """
ET.SubElement(
xml, 'direct', {
'name': '{}_to_{}'.format(input, output),
'input': input,
'output': output
}
)
|
homeassistant/components/twentemilieu/calendar.py | mtarjoianu/core | 30,023 | 12619283 | """Support for Twente Milieu Calendar."""
from __future__ import annotations
from datetime import datetime
from homeassistant.components.calendar import CalendarEntity, CalendarEvent
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
import homeassistant.util.dt as dt_util
from .const import DOMAIN, WASTE_TYPE_TO_DESCRIPTION
from .entity import TwenteMilieuEntity
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Twente Milieu calendar based on a config entry."""
coordinator = hass.data[DOMAIN][entry.data[CONF_ID]]
async_add_entities([TwenteMilieuCalendar(coordinator, entry)])
class TwenteMilieuCalendar(TwenteMilieuEntity, CalendarEntity):
"""Defines a Twente Milieu calendar."""
_attr_name = "<NAME>"
_attr_icon = "mdi:delete-empty"
def __init__(
self,
coordinator: DataUpdateCoordinator,
entry: ConfigEntry,
) -> None:
"""Initialize the Twente Milieu entity."""
super().__init__(coordinator, entry)
self._attr_unique_id = str(entry.data[CONF_ID])
self._event: CalendarEvent | None = None
@property
def event(self) -> CalendarEvent | None:
"""Return the next upcoming event."""
return self._event
async def async_get_events(
self, hass: HomeAssistant, start_date: datetime, end_date: datetime
) -> list[CalendarEvent]:
"""Return calendar events within a datetime range."""
events: list[CalendarEvent] = []
for waste_type, waste_dates in self.coordinator.data.items():
events.extend(
CalendarEvent(
summary=WASTE_TYPE_TO_DESCRIPTION[waste_type],
start=waste_date,
end=waste_date,
)
for waste_date in waste_dates
if start_date.date() <= waste_date <= end_date.date()
)
return events
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
next_waste_pickup_type = None
next_waste_pickup_date = None
for waste_type, waste_dates in self.coordinator.data.items():
if (
waste_dates
and (
next_waste_pickup_date is None
or waste_dates[0] # type: ignore[unreachable]
< next_waste_pickup_date
)
and waste_dates[0] >= dt_util.now().date()
):
next_waste_pickup_date = waste_dates[0]
next_waste_pickup_type = waste_type
self._event = None
if next_waste_pickup_date is not None and next_waste_pickup_type is not None:
self._event = CalendarEvent(
summary=WASTE_TYPE_TO_DESCRIPTION[next_waste_pickup_type],
start=next_waste_pickup_date,
end=next_waste_pickup_date,
)
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
|
components/mpas-seaice/testing_and_setup/testing/DATA/get_data.py | Fa-Li/E3SM | 235 | 12619289 | from __future__ import print_function
import subprocess
import argparse
from six.moves.urllib.parse import urlparse
import string
import os.path
#-------------------------------------------------------------------------------
def download_file(url, destination, proxy):
import subprocess
print(url, proxy)
if (proxy == "none"):
process = subprocess.Popen(["wget", "-O", "%s" %(destination), "%s" %(url)], stdout=subprocess.PIPE)
else:
process = subprocess.Popen(["wget", "-O", "%s" %(destination), "-e", "use_proxy=yes", "-e", proxy, "%s" %(url)], stdout=subprocess.PIPE)
while process.poll() is None:
line = process.stdout.readline() # This blocks until it receives a newline.
print(line)
print(process.stdout.read())
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Download data needed for DEMSI test cases.')
parser.add_argument('-p', '--proxytype', dest='proxytype', help='Proxy type', choices=['none','lanl'], default="none")
args = parser.parse_args()
# proxies
proxies = {"none": {"http": "none",
"https": "none"},
"lanl": {"http": "http_proxy=http://proxyout.lanl.gov:8080",
"https": "https_proxy=http://proxyout.lanl.gov:8080"}}
url = "https://web.lcrc.anl.gov/public/e3sm/mpas_standalonedata/mpas-seaice/testdata/MPAS-Seaice_test_dataset_V1/"
proxy = proxies[args.proxytype][urlparse(url).scheme]
manifest = url + "manifest"
download_file(manifest, "manifest", proxy)
manifestFile = open("manifest","r")
filenames = manifestFile.readlines()
for filename in filenames:
if (not os.path.exists(os.path.dirname(string.strip(filename)))):
os.makedirs(os.path.dirname(string.strip(filename)))
download_file(url + string.strip(filename), string.strip(filename), proxy)
manifestFile.close()
# environment variable to set
print("Set the MPAS-Seaice data environment variable:")
print("export MPAS_SEAICE_DOMAINS_DIR=%s" %(os.getcwd()))
|
libs/configs/DOTA/retinanet/cfgs_res50_dota_atan_v2.py | Karlinik/RotationDetection | 850 | 12619290 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from libs.configs._base_.models.retinanet_r50_fpn import *
from libs.configs._base_.datasets.dota_detection import *
from libs.configs._base_.schedules.schedule_1x import *
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SAVE_WEIGHTS_INTE = 27000
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
# model
# backbone
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# bbox head
ANGLE_RANGE = 180
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0 / 5.0
REG_LOSS_MODE = None
VERSION = 'RetinaNet_DOTA_1x_20210725'
"""
RetinaNet-H + theta=atan(sin(theta)/cos(theta)) + 180, sin^2(theta) + cos^2(theta) = 1
[-90, 90] sin in [-1, 1] cos in [0, 1]
FLOPs: 485784881; Trainable params: 33051321
This is your result for task 1:
mAP: 0.6482820239385153
ap of each class: plane:0.8863486082518542, baseball-diamond:0.7510916490271552, bridge:0.4136498976633022, ground-track-field:0.6934357734426206, small-vehicle:0.5915433817529869, large-vehicle:0.4156886089040786, ship:0.6512479280213479, tennis-court:0.8965927064782218, basketball-court:0.778541563411186, storage-tank:0.7716242837257139, soccer-ball-field:0.5261143148330104, roundabout:0.6328490142731126, harbor:0.5072934651888339, swimming-pool:0.6566747539350666, helicopter:0.55153441016924
The submitted information is :
Description: RetinaNet_DOTA_1x_20210725_35.1w_v1
Username: SJTU-Det
Institute: SJTU
Emailadress: <EMAIL>
TeamMembers: yangxue
"""
|
tests/functional/regressions/test_issue163.py | matt-koevort/tartiflette | 530 | 12619307 | import pytest
async def _resolver(*args, **kwargs):
return {"name": "a", "nickname": "b"}
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(resolvers={"Query.dog": _resolver})
async def test_issue163(engine):
assert await engine.execute("query { dog { ... { name nickname }} }") == {
"data": {"dog": {"name": "a", "nickname": "b"}}
}
|
recipes/Python/174627_Treat_Win32_Registry_like/recipe-174627.py | tdiprima/code | 2,023 | 12619326 | """Slightly magical Win32api Registry -> Dictionary-like-object wrapper"""
from __future__ import generators
import win32api, win32con, cPickle
class RegistryDict(object):
def __init__(self, keyhandle = win32con.HKEY_LOCAL_MACHINE, keypath = [], flags = None):
"""If flags=None, then it will create the key.. otherwise pass a win32con.KEY_* sam"""
keyhandle = None
self.open(keyhandle, keypath, flags)
def massageIncomingRegistryValue((obj, objtype)):
if objtype == win32con.REG_BINARY and obj[:8]=='PyPickle':
obj = obj[8:]
return cPickle.loads(obj)
elif objtype == win32con.REG_NONE:
return None
elif objtype in (win32con.REG_SZ, win32con.REG_EXPAND_SZ, win32con.REG_RESOURCE_LIST, win32con.REG_LINK, win32con.REG_BINARY, win32con.REG_DWORD, win32con.REG_DWORD_LITTLE_ENDIAN, win32con.REG_DWORD_BIG_ENDIAN, win32con.REG_MULTI_SZ):
return obj
raise NotImplementedError, "Registry type 0x%08X not supported" % (objtype,)
massageIncomingRegistryValue = staticmethod(massageIncomingRegistryValue)
def __getitem__(self, item):
item = str(item)
# is it data?
try:
return self.massageIncomingRegistryValue(win32api.RegQueryValueEx(self.keyhandle, item))
except:
pass
# it's probably a key then
try:
return RegistryDict(self.keyhandle, item, win32con.KEY_ALL_ACCESS)
except:
pass
# must not be there
raise KeyError, item
def has_key(self, key):
return self.__contains__(key)
def __contains__(self, key):
try:
self.__getitem__(key)
return 1
except KeyError:
return 0
def copy(self):
return dict(self.iteritems())
def __repr__(self):
return repr(self.copy())
def __str__(self):
return self.__repr__()
def __cmp__(self, other):
return cmp(self.copy(), other)
def __hash__(self):
raise TypeError, "RegistryDict objects are unhashable"
def clear(self):
for k in self.iterkeys():
del self[k]
def iteritems_data(self):
i = 0
# yield data
try:
while 1:
s, obj, objtype = win32api.RegEnumValue(self.keyhandle, i)
yield s, massageRegistryValue((obj, objtype))
i += 1
except:
pass
def iteritems_children(self, access=win32con.KEY_ALL_ACCESS):
i = 0
try:
while 1:
s, obj, objtype = win32api.RegEnumKey(self.keyhandle, i)
yield s, RegistryDict(self.keyhandle, [s], access)
i += 1
except:
pass
def iteritems(self, access=win32con.KEY_ALL_ACCESS):
# yield children
for item in self.iteritems_data():
yield item
for item in self.iteritems_children(access):
yield item
def iterkeys_data(self):
for key, value in self.iteritems_data():
yield key
def iterkeys_children(self, access=win32con.KEY_ALL_ACCESS):
for key, value in self.iteritems_children(access):
yield key
def iterkeys(self):
for key, value in self.iteritems():
yield key
def itervalues_data(self):
for key, value in self.iteritems_data():
yield value
def itervalues_children(self, access=win32con.KEY_ALL_ACCESS):
for key, value in self.iteritems_children(access):
yield value
def itervalues(self, access=win32con.KEY_ALL_ACCESS):
for key, value in self.iteritems(access):
yield value
def items(self, access=win32con.KEY_ALL_ACCESS):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self, access=win32con.KEY_ALL_ACCESS):
return list(self.itervalues(access))
def __delitem__(self, item):
win32api.RegDeleteValue(self.keyhandle, str(item))
def __len__(self):
return len(self.items())
def __iter__(self):
return self.iterkeys()
def popitem(self):
try:
k, v = self.iteritems().next()
del self[k]
return k, v
except StopIteration:
raise KeyError, "RegistryDict is empty"
def get(self,key,default=None):
try:
return self.__getitem__(key)
except:
return default
def setdefault(self,key,default=None):
try:
return self.__getitem__(key)
except:
self.__setitem__(key)
return default
def update(self,d):
for k,v in d.items():
self.__setitem__(k, v)
def __setitem__(self, item, value):
item = str(item)
pyvalue = type(value)
if pyvalue is dict or isinstance(value, RegistryDict):
d = RegistryDict(self.keyhandle, item)
d.clear()
d.update(value)
return
if pyvalue is str:
valuetype = win32con.REG_SZ
elif pyvalue is int:
valuetype = win32con.REG_DWORD
else:
valuetype = win32con.REG_BINARY
value = 'PyPickle' + cPickle.dumps(value)
win32api.RegSetValueEx(self.keyhandle, item, 0, valuetype, value)
def open(self, keyhandle, keypath, flags = None):
if self.keyhandle:
self.close()
if type(keypath) is str:
keypath = keypath.split('\\')
if flags is None:
for subkey in keypath:
keyhandle = win32api.RegCreateKey(keyhandle, subkey)
else:
for subkey in keypath:
keyhandle = win32api.RegOpenKeyEx(keyhandle, subkey, 0, flags)
self.keyhandle = keyhandle
def close(self):
try:
win32api.RegCloseKey(self.keyhandle)
except:
pass
def __del__(self):
self.close()
|
modules/dbnd-airflow/src/dbnd_airflow/tracking/wrap_operators.py | busunkim96/dbnd | 224 | 12619352 | import logging
import typing
import uuid
from collections import OrderedDict
from contextlib import contextmanager
from typing import Any, ContextManager, Dict, Optional
import six
from dbnd import dbnd_context
from dbnd._core.configuration.environ_config import (
DBND_ROOT_RUN_UID,
ENV_DBND_SCRIPT_NAME,
)
from dbnd._core.constants import TaskRunState, UpdateSource
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
get_task_family_for_inline_script,
get_task_run_uid_for_inline_script,
)
from dbnd._core.tracking.backends import TrackingStore
from dbnd._core.utils.type_check_utils import is_instance_by_class_name
from dbnd._core.utils.uid_utils import get_task_run_attempt_uid, get_task_run_uid
from dbnd_airflow.tracking.conf_operations import flat_conf
from dbnd_airflow.tracking.dbnd_airflow_conf import get_databand_url_conf
from dbnd_airflow.tracking.dbnd_spark_conf import (
add_spark_env_fields,
dbnd_wrap_spark_environment,
get_databricks_java_agent_conf,
get_databricks_python_script_name,
get_spark_submit_java_agent_conf,
spark_submit_with_dbnd_tracking,
)
from dbnd_airflow.tracking.fakes import FakeRun, FakeTask, FakeTaskRun
if typing.TYPE_CHECKING:
from airflow.models import BaseOperator
from airflow.contrib.operators.ecs_operator import ECSOperator
from airflow.contrib.operators.spark_submit_operator import SparkSubmitOperator
from airflow.contrib.operators.databricks_operator import (
DatabricksSubmitRunOperator,
)
from airflow.contrib.operators.databricks_operator import DatabricksHook
from airflow.contrib.hooks.databricks_hook import RunState as DatabricksRunState
logger = logging.getLogger(__name__)
@contextmanager
def track_emr_add_steps_operator(operator, tracking_info):
flat_spark_envs = flat_conf(add_spark_env_fields(tracking_info))
for step in operator.steps:
args = step["HadoopJarStep"]["Args"]
if args and "spark-submit" in args[0]:
step["HadoopJarStep"]["Args"] = spark_submit_with_dbnd_tracking(
args, dbnd_context=flat_spark_envs
)
yield
@contextmanager
def track_databricks_submit_run_operator(operator, tracking_info):
# type: (DatabricksSubmitRunOperator, Dict[str, str])-> None
config = operator.json
script_name = None # type: str
# passing env variables is only supported in new clusters
if "new_cluster" in config:
cluster = config["new_cluster"]
cluster.setdefault("spark_env_vars", {})
cluster["spark_env_vars"].update(tracking_info)
cluster["spark_env_vars"].update(get_databand_url_conf())
if "spark_python_task" in config:
cluster["spark_env_vars"].update(
get_databricks_python_script_name(
config["spark_python_task"]["python_file"]
)
)
script_name = cluster["spark_env_vars"][ENV_DBND_SCRIPT_NAME]
# calculate deterministic task uids so we can use it for manual completion
(
task_id,
task_run_uid,
task_run_attempt_uid,
) = get_task_run_uid_for_inline_script(tracking_info, script_name)
if "spark_jar_task" in config:
cluster.setdefault("spark_conf", {})
agent_conf = get_databricks_java_agent_conf()
if agent_conf is not None:
cluster["spark_conf"].update(agent_conf)
yield
if script_name:
# When dbnd is running inside Databricks, the script can be SIGTERM'd before dbnd will send state to tracker
# So we need to check whenever the run was succeeded afterwards and set run state manually
tracking_store = dbnd_context().tracking_store # type: TrackingStore
run = FakeRun(source=UpdateSource.airflow_tracking)
task_run = FakeTaskRun(
task_run_uid=task_run_uid,
task_run_attempt_uid=task_run_attempt_uid,
run=run,
task=FakeTask(task_name=task_id, task_id=task_id),
task_af_id=task_id,
)
try:
hook = operator.get_hook() # type: DatabricksHook
state = hook.get_run_state(operator.run_id) # type: DatabricksRunState
run_page_url = hook.get_run_page_url(operator.run_id)
except Exception as exc:
logger.error(
"Unable to get inline script run state from Databricks. Setting task run state to Failed: %s",
exc,
)
set_task_run_state_safe(tracking_store, task_run, TaskRunState.FAILED)
return
save_extrnal_links_safe(tracking_store, task_run, {"databricks": run_page_url})
if state.is_successful:
set_task_run_state_safe(tracking_store, task_run, TaskRunState.SUCCESS)
else:
# TODO: error should be extracted from plain Databricks logs
set_task_run_state_safe(tracking_store, task_run, TaskRunState.FAILED)
def set_task_run_state_safe(tracking_store, task_run, state):
# (TrackingStore, Any, TaskRunState) -> None
try:
tracking_store.set_task_run_state(task_run=task_run, state=state)
except Exception as exc:
logger.error(
"Unable to set task run state: %s", exc,
)
def save_extrnal_links_safe(tracking_store, task_run, links_dict):
# (TrackingStore, Any, Dict[str, str]) -> None
try:
tracking_store.save_external_links(
task_run=task_run, external_links_dict=links_dict,
)
except Exception as exc:
logger.error(
"Unable to set external links: %s", exc,
)
@contextmanager
def track_data_proc_pyspark_operator(operator, tracking_info):
if operator.dataproc_properties is None:
operator.dataproc_properties = dict()
spark_envs = add_spark_env_fields(tracking_info)
operator.dataproc_properties.update(spark_envs)
yield
@contextmanager
def track_spark_submit_operator(operator, tracking_info):
# type: (SparkSubmitOperator, Dict[str,str])-> None
if operator._conf is None:
operator._conf = dict()
spark_envs = add_spark_env_fields(tracking_info)
operator._conf.update(spark_envs)
if operator._env_vars is None:
operator._env_vars = dict()
dbnd_env_vars = dbnd_wrap_spark_environment()
operator._env_vars.update(dbnd_env_vars)
if _has_java_application(operator):
agent_conf = get_spark_submit_java_agent_conf()
if agent_conf is not None:
operator._conf.update(agent_conf)
yield
def _has_java_application(operator):
return (
operator._application.endswith(".jar")
or operator._jars
and operator._jars.ends_with(".jar")
)
@contextmanager
def track_ecs_operator(operator, tracking_info):
# type: (ECSOperator, Dict[str,str])-> None
"""
Adding the the tracking info to the ECS environment through the `override` -> `containerOverrides`.
Notice that we require to have `overrides` and `containerOverrides` with containers names in-ordered to make it work
Airflow pass the override to boto so here is the boto3 docs:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
"""
info_as_env_var = [
{"name": key, "value": value} for key, value in six.iteritems(tracking_info)
]
new = []
if "containerOverrides" in operator.overrides:
for override in operator.overrides["containerOverrides"]:
override.setdefault("environment", [])
override["environment"].extend(info_as_env_var)
new.append(override)
operator.overrides["containerOverrides"] = new
yield
# registering operators names to the relevant tracking method
_EXECUTE_TRACKING = OrderedDict(
[
("EmrAddStepsOperator", track_emr_add_steps_operator),
("DatabricksSubmitRunOperator", track_databricks_submit_run_operator),
("DataProcPySparkOperator", track_data_proc_pyspark_operator),
("SparkSubmitOperator", track_spark_submit_operator),
("ECSOperator", track_ecs_operator),
]
)
def wrap_operator_with_tracking_info(tracking_info, operator):
# type: (Dict[str, str], Any) -> Optional[ContextManager]
"""
Wrap the operator with relevant tracking method, if found such method.
"""
for class_name, tracking_wrapper in _EXECUTE_TRACKING.items():
if is_instance_by_class_name(operator, class_name):
return tracking_wrapper(operator, tracking_info)
|
cfgov/housing_counselor/management/commands/hud_generate_html.py | Colin-Seifer/consumerfinance.gov | 156 | 12619357 | <reponame>Colin-Seifer/consumerfinance.gov
from django.core.management.base import BaseCommand
from housing_counselor.generator import generate_counselor_html
class Command(BaseCommand):
help = "Generate bulk housing counselor HTML data"
def add_arguments(self, parser):
parser.add_argument("source")
parser.add_argument("target")
def handle(self, *args, **options):
generate_counselor_html(options["source"], options["target"])
|
mimic/rest/maas_api.py | mfens98/mimic | 141 | 12619376 | """
MAAS Mock API
"""
from __future__ import division, unicode_literals
import json
import collections
from six.moves.urllib.parse import parse_qs
import random
import re
from uuid import uuid4
import attr
from six import text_type
from zope.interface import implementer
from twisted.plugin import IPlugin
from mimic.catalog import Entry
from mimic.catalog import Endpoint
from mimic.rest.identity_api import base_uri_from_request
from mimic.rest.mimicapp import MimicApp
from mimic.imimic import IAPIMock
from mimic.canned_responses.maas_json_home import json_home
from mimic.canned_responses.maas_monitoring_zones import monitoring_zones
from mimic.canned_responses.maas_alarm_examples import alarm_examples
from mimic.model.maas_errors import ObjectDoesNotExist, ParentDoesNotExist
from mimic.model.maas_objects import (Agent,
Alarm,
AlarmState,
Check,
Entity,
MaasStore,
Notification,
NotificationPlan,
Suppression)
from mimic.util.helper import json_from_request
from mimic.util.helper import Matcher, random_hex_generator, random_hipsum
MISSING_REQUIRED_ARGUMENT_REGEX = re.compile(
r'__init__\(\) missing \d+ required positional argument: \'(\w+)\'')
REMOTE_CHECK_TYPE_REGEX = re.compile(r'^remote\.')
@implementer(IAPIMock, IPlugin)
class MaasApi(object):
"""
Rest endpoints for mocked MAAS Api.
"""
def __init__(self, regions=["ORD"]):
"""
Set regions
"""
self._regions = regions
def catalog_entries(self, tenant_id):
"""
List catalog entries for the MaaS API.
"""
return [
Entry(
tenant_id, "rax:monitor", "cloudMonitoring",
[
Endpoint(tenant_id, region, text_type(uuid4()),
"v1.0")
for region in self._regions
]
)
]
def resource_for_region(self, region, uri_prefix, session_store):
"""
Get an : obj: `twisted.web.iweb.IResource` for the given URI prefix;
implement : obj: `IAPIMock`.
"""
return MaasMock(self, uri_prefix, session_store, region).app.resource()
class MCache(object):
"""
M(onitoring) Cache Object to hold dictionaries of all entities, checks and alarms.
"""
def __init__(self, clock):
"""
Create the initial structs for cache
"""
current_time_milliseconds = int(1000 * clock.seconds())
self.entities = collections.OrderedDict()
self.notifications = collections.OrderedDict(
[(u'ntTechnicalContactsEmail',
Notification(id=u'ntTechnicalContactsEmail',
label=u'Email All Technical Contacts',
created_at=current_time_milliseconds,
updated_at=current_time_milliseconds,
type=u'technicalContactsEmail'))])
self.notification_plans = collections.OrderedDict(
[(u'npTechnicalContactsEmail',
NotificationPlan(id=u'npTechnicalContactsEmail',
label=u'Technical Contacts - Email',
created_at=current_time_milliseconds,
updated_at=current_time_milliseconds))])
self.notificationtypes_list = [{'id': 'webhook', 'fields': [{'name': 'url',
'optional': False,
'description': 'An HTTP or \
HTTPS URL to POST to'}]},
{'id': 'email', 'fields': [{'name': 'address',
'optional': False,
'description': 'Email \
address to send notifications to'}]},
{'id': 'pagerduty', 'fields': [{'name': 'service_key',
'optional': False,
'description': 'The PagerDuty \
service key to use.'}]},
{'id': 'sms', 'fields': [{'name': 'phone_number',
'optional': False,
'description': 'Phone number to send \
the notification to, \
with leading + and country \
code (E.164 format)'}]}]
self.suppressions = collections.OrderedDict()
self.audits_list = []
self.maas_store = MaasStore(clock)
self.test_alarm_responses = {}
self.test_alarm_errors = {}
def _only_keys(dict_ins, keys):
"""
Filters out unwanted keys of a dict.
"""
return {k: dict_ins[k] for k in dict_ins if k in keys}
def create_entity(clock, params):
"""
Returns a dictionary representing an entity
:return: an Entity model, which is described in `the Rackspace Cloud
Monitoring Developer Guide, section 5.4
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-entities.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``bool``, ``dict`` or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, Entity.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy[
'updated_at'] = current_time_milliseconds
return Entity(**params_copy)
def create_check(clock, params):
"""
Returns a dictionary representing a check
:return: a Check model, which is described in `the Rackspace Cloud
Monitoring Developer Guide, section 5.7
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-checks.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``int``, ``bool``, ``dict`` or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, Check.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy[
'updated_at'] = current_time_milliseconds
return Check(**params_copy)
def create_alarm(clock, entity_id, params):
"""
Returns a dictionary representing an alarm
:return: an Alarm model, which is described in `the Rackspace Cloud
Monitoring Developer Guide, section 5.12
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-alarms.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``bool``, ``dict``, or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, Alarm.USER_SPECIFIABLE_KEYS)
params_copy['entity_id'] = entity_id
params_copy['created_at'] = params_copy[
'updated_at'] = current_time_milliseconds
return Alarm(**params_copy)
def create_notification_plan(clock, params):
"""
Creates a notification plan
:return: a Notification Plan model, which is described in `the
Rackspace Cloud Monitoring Developer Guide, section 5.11
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-notification-plans.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``dict`` or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, NotificationPlan.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy[
'updated_at'] = current_time_milliseconds
return NotificationPlan(**params_copy)
def create_notification(clock, params):
"""
Creates a notification target
:return: a Notification model, which is described in `the Rackspace
Cloud Monitoring Developer Guide, section 5.10
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-notifications.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode``, ``float``,
``dict`` or ``NoneType``.
"""
current_time_milliseconds = int(1000 * clock.seconds())
params_copy = _only_keys(params, Notification.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy[
'updated_at'] = current_time_milliseconds
return Notification(**params_copy)
def create_suppression(clock, params):
"""
Creates a suppression
:return: a Suppression model, which is described in `the Rackspace
Cloud Monitoring Developer Guide, section 5.16
<http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/service-suppressions.html>`_
:rtype: ``dict`` mapping ``unicode`` to ``unicode`` or ``list``.
"""
params_copy = _only_keys(params, Suppression.USER_SPECIFIABLE_KEYS)
params_copy['created_at'] = params_copy[
'updated_at'] = int(1000 * clock.seconds())
return Suppression(**params_copy)
def _get_object(collection, object_type, object_key, alt_key=None):
"""
Gets the specified object from the collection or throws ObjectDoesNotExist.
The collection should behave like a dict where object_key retrieves
an object from the collection.
"""
try:
return collection[object_key]
except KeyError:
raise ObjectDoesNotExist(object_type=object_type,
key=(alt_key or object_key))
def _delete_object(collection, object_type, object_key, alt_key=None):
"""
Deletes the specified object from the collection or throws ObjectDoesNotExist.
"""
try:
del collection[object_key]
except KeyError:
raise ObjectDoesNotExist(object_type=object_type,
key=(alt_key or object_key))
def _get_entity(entities, entity_id):
"""
Gets the entity from the collection or throws ObjectDoesNotExist.
"""
return _get_object(entities, 'Entity', entity_id)
def _delete_entity(entities, entity_id):
"""
Deletes the entity from the collection or throws ObjectDoesNotExist.
"""
_delete_object(entities, 'Entity', entity_id)
def _get_parent_entity(entities, entity_id):
"""
Gets the parent entity from the collection, or throws ParentDoesNotExist.
"""
try:
return entities[entity_id]
except KeyError:
raise ParentDoesNotExist(object_type='Entity', key=entity_id)
def _get_check(entities, entity_id, check_id):
"""
Gets the check from the collection, or throws an error if it does not exist.
"""
entity = _get_parent_entity(entities, entity_id)
return _get_object(entity.checks,
'Check',
check_id,
'{0}:{1}'.format(entity_id, check_id))
def _delete_check(entities, entity_id, check_id):
"""
Deletes the check from the collection, or throws an error if it does not exist.
"""
entity = _get_parent_entity(entities, entity_id)
_delete_object(entity.checks,
'Check',
check_id,
'{0}:{1}'.format(entity_id, check_id))
alarms_to_delete = [alarm_id for alarm_id in entity.alarms
if entity.alarms[alarm_id].check_id == check_id]
for alarm_id in alarms_to_delete:
del entity.alarms[alarm_id]
def _delete_alarm(entities, entity_id, alarm_id):
"""
Deletes the alarm from the collection, or throws an error if it does not exist.
"""
entity = _get_parent_entity(entities, entity_id)
_delete_object(entity.alarms,
'Alarm',
alarm_id,
'{0}:{1}'.format(entity_id, alarm_id))
def _get_alarm(entities, entity_id, alarm_id):
"""
Gets the alarm from the collection, or throws an error if it does not exist.
"""
entity = _get_parent_entity(entities, entity_id)
return _get_object(entity.alarms,
'Alarm',
alarm_id,
'{0}:{1}'.format(entity_id, alarm_id))
def _get_notification(notifications, nt_id):
"""
Gets the notification from the collection or throws ObjectDoesNotExist.
"""
return _get_object(notifications, 'Notification', nt_id)
def _delete_notification(notifications, nt_id):
"""
Deletes the notification from the collection, or throws ObjectDoesNotExist.
"""
_delete_object(notifications, 'Notification', nt_id)
def _get_notification_plan(notification_plans, np_id):
"""
Gets the notification plan from the collection.
This function gets the notification plan or throws ObjectDoesNotExist
if it does not exist.
"""
return _get_object(notification_plans, 'NotificationPlan', np_id)
def _delete_notification_plan(notification_plans, np_id):
"""
Deletes the notification plan from the collection, or throws ObjectDoesNotExist.
"""
_delete_object(notification_plans, 'NotificationPlan', np_id)
def _get_suppression(suppressions, sp_id):
"""
Gets the suppression from the collection or throws ObjectDoesNotExist.
"""
return _get_object(suppressions, 'Suppression', sp_id)
def _delete_suppression(suppressions, sp_id):
"""
Deletes the suppression from the collection, or throws ObjectDoesNotExist.
"""
_delete_object(suppressions, 'Suppression', sp_id)
def _map_getter(collection, request, object_type, object_key):
"""
Getter handler for objects in a Mapping type collection.
"""
try:
obj = _get_object(collection, object_type, object_key)
return json.dumps(obj.to_json())
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
def _find_missing_required_key(cls, post_data, additional_keys):
"""
Finds a missing required key in the case that trying to create an instance
failed with a TypeError.
"""
fields_by_name = {field.name: field for field in attr.fields(cls)}
specified_keys = {key for sublist in [post_data.keys(), additional_keys]
for key in sublist}
missing_keys = [key for key in fields_by_name
if fields_by_name[key].default is attr.NOTHING and key not in specified_keys]
return missing_keys[0]
def _metric_list_for_check(maas_store, entity, check):
"""
Computes the metrics list for a given check.
Remote checks return a metric for each monitoring zone and
each type of metric for the check type. Agent checks return
a metric for each metric type on the check type. Check types
that Mimic doesn't know about generate an empty list.
"""
if check.type not in maas_store.check_types:
return []
if REMOTE_CHECK_TYPE_REGEX.match(check.type):
return [{'name': '{0}.{1}'.format(mz, metric.name),
'type': metric.type,
'unit': metric.unit}
for metric in maas_store.check_types[check.type].metrics
for mz in check.monitoring_zones_poll]
return [{'name': metric.name,
'type': metric.type,
'unit': metric.unit}
for metric in maas_store.check_types[check.type].metrics]
def _metric_list_for_entity(maas_store, entity):
"""
Creates the metrics list for one entity.
"""
return {'entity_id': entity.id,
'entity_label': entity.label,
'checks': [{'id': check.id,
'label': check.label,
'type': check.type,
'metrics': _metric_list_for_check(maas_store, entity, check)}
for check in entity.checks.values()]}
def _multiplot_interval(from_date, to_date, points):
"""
Computes the size of the interval between points in a multiplot.
:return: the multiplot interval size.
:rtype: ``float``
"""
if points < 2:
return 0.0
return (to_date - from_date) / (points - 1)
def _compute_multiplot(maas_store, entity_id, check, metric_name, from_date, to_date, points):
"""
Computes multiplot data for a single (entity, check, metric) group.
"""
fallback = {'entity_id': entity_id,
'check_id': check.id,
'metric': metric_name,
'unit': 'unknown',
'type': 'unknown',
'data': []}
if check.type not in maas_store.check_types:
return fallback
interval = _multiplot_interval(from_date, to_date, points)
metric = None
base_metric_name = metric_name
metric_value_kwargs = {'entity_id': entity_id,
'check_id': check.id}
if re.match(r'^remote\.', check.type):
match = re.match(r'^(mz\w+)\.(\w+)$', metric_name)
if not match:
return fallback
metric_value_kwargs['monitoring_zone'] = match.group(1)
base_metric_name = match.group(2)
try:
metric = maas_store.check_types[
check.type].get_metric_by_name(base_metric_name)
except NameError:
return fallback
return {'entity_id': entity_id,
'check_id': check.id,
'metric': metric_name,
'unit': metric.unit,
'type': metric.type,
'data': [{'numPoints': 4,
'timestamp': int(from_date + (i * interval)),
'average': metric.get_value(
timestamp=int(from_date + (i * interval)),
**metric_value_kwargs)}
for i in range(points)]}
def parse_and_flatten_qs(url):
"""
Parses a querystring and flattens 1-arg arrays.
"""
qs = parse_qs(url)
flat_qs = {}
for key in qs:
flat_qs[key] = qs[key][0] if len(qs[key]) == 1 else qs[key]
return flat_qs
def _mcache_factory(clock):
"""
Returns a function that makes a defaultdict that makes MCache objects
for each tenant.
"""
return lambda: collections.defaultdict(lambda: MCache(clock))
class MaasMock(object):
"""
Klein routes for the Monitoring API.
"""
def __init__(self, api_mock, uri_prefix, session_store, name):
"""
Create a maas region with a given URI prefix (used for generating URIs
to servers).
"""
self._api_mock = api_mock
self._session_store = session_store
self._name = name
def _entity_cache_for_tenant(self, tenant_id):
"""
Retrieve the M_cache object containing all objects created so far
"""
clock = self._session_store.clock
return (self._session_store.session_for_tenant_id(tenant_id)
.data_for_api(self._api_mock, _mcache_factory(clock))[self._name]
)
def _audit(self, app, request, tenant_id, status, content=b''):
headers = {k.decode("utf-8"): [vv.decode("utf-8") if isinstance(vv, bytes) else vv for vv in v]
for k, v in request.getAllHeaders().items()
if k != b'x-auth-token'}
record = {
'id': text_type(uuid4()),
'timestamp': int(1000 * self._session_store.clock.seconds()),
'headers': headers,
'url': request.path.decode("utf-8"),
'app': app,
'query': parse_and_flatten_qs(request.uri.decode("utf-8")),
'txnId': text_type(uuid4()),
'payload': content.decode("utf-8"),
'method': request.method.decode("utf-8"),
'account_id': tenant_id,
'who': '',
'why': '',
'statusCode': status
}
self._entity_cache_for_tenant(tenant_id).audits_list.append(record)
app = MimicApp()
@app.route('/v1.0/<string:tenant_id>/mimic/reset', methods=['GET'])
def doreset(self, request, tenant_id):
"""
Reset the session
"""
self._session_store.session_for_tenant_id(tenant_id)._api_objects = {}
return "Session has been reset for tenant_id " + tenant_id
@app.route('/v1.0/<string:tenant_id>/entities', methods=['GET'])
def list_entities(self, request, tenant_id):
"""
Replies the entities list call
"""
entities = list(self._entity_cache_for_tenant(
tenant_id).entities.values())
limit = 100
marker = None
next_marker = None
next_href = None
if b'limit' in request.args:
limit = int(request.args[b'limit'][0].strip())
if b'marker' in request.args:
marker = request.args[b'marker'][0].strip().decode("utf-8")
for q in range(len(entities)):
if entities[q].id == marker:
entities = entities[q:]
break
try:
next_marker = entities[limit].id
except Exception:
pass
entities = entities[:limit]
metadata = {'count': len(entities),
'limit': limit,
'marker': marker,
'next_marker': next_marker,
'next_href': next_href}
request.setResponseCode(200)
return json.dumps({'metadata': metadata,
'values': [entity.to_json() for entity in entities]})
@app.route('/v1.0/<string:tenant_id>/entities', methods=['POST'])
def create_entity(self, request, tenant_id):
"""
Creates a new entity
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
newentity = create_entity(self._session_store.clock, postdata)
self._entity_cache_for_tenant(tenant_id).entities[
newentity.id] = newentity
status = 201
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newentity.id.encode('utf-8'))
request.setHeader(b'x-object-id', newentity.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('entities', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>', methods=['GET'])
def get_entity(self, request, tenant_id, entity_id):
"""
Fetches a specific entity
"""
return _map_getter(self._entity_cache_for_tenant(tenant_id).entities,
request,
"Entity",
entity_id)
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks', methods=['GET'])
def get_checks_for_entity(self, request, tenant_id, entity_id):
"""
Returns all the checks for a paricular entity
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
entity = _get_parent_entity(entities, entity_id)
checks = entity.list_checks()
metadata = {'count': len(checks),
'limit': 1000,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': checks})
except ParentDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>', methods=['PUT'])
def update_entity(self, request, tenant_id, entity_id):
"""
Update entity in place.
"""
content = request.content.read()
update = json.loads(content.decode("utf-8"))
update_kwargs = dict(update)
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
entity = _get_entity(entities, entity_id)
entity.update(clock=self._session_store.clock, **update_kwargs)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('entities', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path)
request.setHeader(b'x-object-id', entity_id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('entities', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>', methods=['DELETE'])
def delete_entity(self, request, tenant_id, entity_id):
"""
Delete an entity, all checks that belong to entity, all alarms that belong to those checks
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
_delete_entity(entities, entity_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('entities', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('entities', request, tenant_id, status)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks', methods=['POST'])
def create_check(self, request, tenant_id, entity_id):
"""
Create a check
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
entities = self._entity_cache_for_tenant(tenant_id).entities
newcheck = None
try:
newcheck = create_check(self._session_store.clock, postdata)
except TypeError:
missing_key = _find_missing_required_key(
Check, postdata, ['created_at', 'updated_at'])
status = 400
request.setResponseCode(status)
self._audit('checks', request, tenant_id, status, content)
return json.dumps({'type': 'badRequest',
'code': status,
'message': 'Validation error for key \'{0}\''.format(missing_key),
'details': 'Missing required key ({0})'.format(missing_key),
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
try:
entity = _get_entity(entities, entity_id)
entity.checks[newcheck.id] = newcheck
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('checks', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 201
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newcheck.id.encode('utf-8'))
request.setHeader(b'x-object-id', newcheck.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('checks', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks/<string:check_id>',
methods=['GET'])
def get_check(self, request, tenant_id, entity_id, check_id):
"""
Get a specific check that was created before
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
check = _get_check(entities, entity_id, check_id)
return json.dumps(check.to_json())
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks/<string:check_id>',
methods=['PUT'])
def update_check(self, request, tenant_id, entity_id, check_id):
"""
Updates a check in place.
"""
content = request.content.read()
update = json.loads(content.decode("utf-8"))
update_kwargs = dict(update)
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
check = _get_check(entities, entity_id, check_id)
check.update(clock=self._session_store.clock, **update_kwargs)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
self._audit('checks', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path)
request.setHeader(b'x-object-id', check_id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('checks', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks/<string:check_id>',
methods=['DELETE'])
def delete_check(self, request, tenant_id, entity_id, check_id):
"""
Deletes check and all alarms associated to it
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
_delete_check(entities, entity_id, check_id)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
self._audit('checks', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('checks', request, tenant_id, status)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/test-check', methods=['POST'])
def test_check(self, request, tenant_id, entity_id):
"""
Tests a check.
If the user has configured overrides using the control API for
test-check using this entity and check type, those will be used.
Otherwise, random values within each metric type will be
generated. For instance, integer metrics generate integers, and
string metrics generate strings. No other guarantees are made.
"""
content = request.content.read()
test_config = json.loads(content.decode("utf-8"))
check_type = test_config['type']
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
response_code, response_body = maas_store.check_types[check_type].get_test_check_response(
entity_id=entity_id,
monitoring_zones=test_config.get('monitoring_zones_poll'),
timestamp=int(1000 * self._session_store.clock.seconds()))
request.setResponseCode(response_code)
self._audit('checks', request, tenant_id, response_code, content)
return json.dumps(response_body)
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms', methods=['POST'])
def create_alarm(self, request, tenant_id, entity_id):
"""
Creates alarm
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
newalarm = create_alarm(
self._session_store.clock, entity_id, postdata)
except TypeError:
missing_key = _find_missing_required_key(Alarm, postdata, ['created_at',
'updated_at',
'entity_id'])
status = 400
request.setResponseCode(status)
self._audit('alarms', request, tenant_id, status, content)
return json.dumps({'type': 'badRequest',
'code': status,
'message': 'Validation error for key \'{0}\''.format(missing_key),
'details': 'Missing required key ({0})'.format(missing_key),
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
try:
entity = _get_parent_entity(entities, entity_id)
entity.alarms[newalarm.id] = newalarm
except ParentDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('alarms', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 201
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newalarm.id.encode('utf-8'))
request.setHeader(b'x-object-id', newalarm.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('alarms', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/<string:alarm_id>',
methods=['GET'])
def get_alarm(self, request, tenant_id, entity_id, alarm_id):
"""
Gets an alarm by ID.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
alarm = _get_alarm(entities, entity_id, alarm_id)
return json.dumps(alarm.to_json())
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/<string:alarm_id>',
methods=['PUT'])
def update_alarm(self, request, tenant_id, entity_id, alarm_id):
"""
Updates an alarm in place.
Documentation for this API can be found in the Rackspace Cloud
Monitoring Developer Guide, section 5.12.5, "Update alarm by ID".
The full link is quite long, but you can reach it by browsing
to the following goo.gl URL:
http://goo.gl/NhxgTZ
"""
content = request.content.read()
update = json.loads(content.decode("utf-8"))
update_kwargs = dict(update)
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
alarm = _get_alarm(entities, entity_id, alarm_id)
alarm.update(clock=self._session_store.clock, **update_kwargs)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
self._audit('alarms', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path)
request.setHeader(b'x-object-id', alarm_id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('alarms', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/<string:alarm_id>',
methods=['DELETE'])
def delete_alarm(self, request, tenant_id, entity_id, alarm_id):
"""
Delete an alarm
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
_delete_alarm(entities, entity_id, alarm_id)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
self._audit('alarms', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
self._audit('alarms', request, tenant_id, status)
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/test-alarm', methods=['POST'])
def test_alarm(self, request, tenant_id, entity_id):
"""
Test an alarm.
This API can be driven using the control API to set an error
or canned success response. If no error or success response is set,
it will return success with a random state and status. Users should
not expect this API to consistently return either OK, WARNING or
CRITICAL without first setting the response in the control API.
"""
content = request.content.read()
payload = json.loads(content.decode("utf-8"))
n_tests = len(payload['check_data'])
current_time_milliseconds = int(
1000 * self._session_store.clock.seconds())
status = 200
response_payload = []
test_responses = self._entity_cache_for_tenant(
tenant_id).test_alarm_responses
test_errors = self._entity_cache_for_tenant(
tenant_id).test_alarm_errors
if entity_id in test_errors and len(test_errors[entity_id]) > 0:
error_response = test_errors[entity_id].popleft()
status = error_response['code']
response_payload = error_response['response']
elif entity_id in test_responses:
n_responses = len(test_responses[entity_id])
for i in range(n_tests):
test_response = test_responses[entity_id][i % n_responses]
response_payload.append({'state': test_response['state'],
'status': test_response.get(
'status', 'Matched default return statement'),
'timestamp': current_time_milliseconds})
else:
for _ in range(n_tests):
response_payload.append({'state': random.choice(['OK', 'WARNING', 'CRITICAL']),
'status': random_hipsum(12),
'timestamp': current_time_milliseconds})
request.setResponseCode(status)
self._audit('alarms', request, tenant_id, status, content)
return json.dumps(response_payload)
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms', methods=['GET'])
def get_alarms_for_entity(self, request, tenant_id, entity_id):
"""
Get all alarms for the specified entity.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
try:
entity = _get_parent_entity(entities, entity_id)
alarms = entity.list_alarms()
metadata = {'count': len(alarms),
'limit': 1000,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': alarms})
except ParentDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
@app.route('/v1.0/<string:tenant_id>/views/overview', methods=['GET'])
def overview(self, request, tenant_id):
"""
serves the overview api call,returns all entities,checks and alarms
"""
entity_map = self._entity_cache_for_tenant(tenant_id).entities
all_entities = None
if b'entityId' in request.args:
entity_ids = [a.decode("utf-8") for a in request.args[b'entityId']]
all_entities = [entity_map[entity_id] for entity_id in entity_ids
if entity_id in entity_map]
if len(all_entities) == 0:
err = ObjectDoesNotExist(
object_type='Entity', key=','.join(entity_ids))
request.setResponseCode(err.code)
return json.dumps(err.to_json())
else:
all_entities = list(entity_map.values())
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
page_limit = min(int(request.args.get(b'limit', [100])[0]), 1000)
offset = 0
current_marker = request.args.get(b'marker', [None])[0]
if current_marker is not None:
current_marker = current_marker.decode("utf-8")
try:
offset = all_entities.index(
Matcher(lambda entity: entity.id == current_marker))
except ValueError:
offset = 0
entities = all_entities[offset:offset + page_limit]
next_marker = None
if offset + page_limit < len(all_entities):
next_marker = all_entities[offset + page_limit].id
metadata = {
'count': len(entities),
'marker': current_marker,
'next_marker': next_marker,
'limit': page_limit,
'next_href': None
}
values = [{'alarms': entity.list_alarms(),
'checks': entity.list_checks(),
'entity': entity.to_json(),
'latest_alarm_states': [
state.brief_json()
for state in maas_store.latest_alarm_states_for_entity(entity.id)]}
for entity in entities]
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': values})
@app.route('/v1.0/<string:tenant_id>/audits', methods=['GET'])
def list_audits(self, request, tenant_id):
"""
Gets the user's audit logs.
"""
ordering = -1 if request.args.get(b'reverse', False) else 1
all_audits = self._entity_cache_for_tenant(
tenant_id).audits_list[::ordering]
page_limit = min(int(request.args.get(b'limit', [100])[0]), 1000)
offset = 0
current_marker = request.args.get(b'marker', [None])[0]
if current_marker is not None:
current_marker = current_marker.decode("utf-8")
try:
offset = all_audits.index(
Matcher(lambda audit: audit['id'] == current_marker))
except ValueError:
offset = 0
audits = all_audits[offset:offset + page_limit]
next_marker = None
if offset + page_limit < len(all_audits):
next_marker = all_audits[offset + page_limit]['id']
metadata = {
'count': len(audits),
'marker': current_marker,
'next_marker': next_marker,
'limit': page_limit,
'next_href': None
}
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': audits})
@app.route('/v1.0/<string:tenant_id>/__experiments/json_home', methods=['GET'])
def service_json_home(self, request, tenant_id):
"""
jsonhome call. CloudIntellgiences doesn't actually use these URLs directly.
Rather, do some regex on them to figure how to know what permissions the user as
have
TO DO: Regionless api
"""
request.setResponseCode(200)
mockapi_id = re.findall('/mimicking/(.+?)/',
request.path.decode("utf-8"))[0]
url = base_uri_from_request(request).rstrip(
'/') + '/mimicking/' + mockapi_id + '/ORD/v1.0'
return json.dumps(json_home(url))
@app.route('/v1.0/<string:tenant_id>/views/agent_host_info', methods=['GET'])
def view_agent_host_info(self, request, tenant_id):
"""
Mocks the /views/agent_host_info API call.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
if b'include' not in request.args:
request.setResponseCode(400)
return json.dumps({'type': 'badRequest',
'code': 400,
'message': 'Validation error for key \'include\'',
'details': 'Must include at least one HOST_INFO_TYPE.',
'txnId': ('.fake.mimic.transaction.id.c-1111111'
'.ts-123444444.v-12344frf')})
if b'entityId' not in request.args:
request.setResponseCode(400)
return json.dumps({'type': 'badRequest',
'code': 400,
'message': 'Validation error for key \'agentId, entityId, uri\'',
'details': 'You must specify an agentId, entityId, or an entity URI.',
'mimicNotes': 'But mimic will only accept entityId right now',
'txnId': ('.fake.mimic.transaction.id.c-1111111'
'.ts-123444444.v-12344frf')})
entity_id = request.args[b'entityId'][0].strip().decode("utf-8")
entity = None
try:
entity = _get_entity(entities, entity_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
if entity.agent_id is None:
request.setResponseCode(400)
return json.dumps({'type': 'agentDoesNotExist',
'code': 400,
'message': 'Agent does not exist',
'details': 'Agent null does not exist',
'txnId': ('.fake.mimic.transaction.id.c-1111111.'
'ts-123444444.v-12344frf')})
try:
agent = maas_store.agents[entity.agent_id]
except KeyError:
request.setResponseCode(400)
return json.dumps({'type': 'agentDoesNotExist',
'code': 400,
'message': 'Agent does not exist',
'details': 'Agent {0} does not exist'.format(entity.agent_id),
'txnId': ('.fake.mimic.transaction.id.c-1111111.'
'ts-123444444.v-12344frf')})
request.setResponseCode(200)
return json.dumps({
'values': [{'agent_id': entity.agent_id,
'entity_id': entity_id,
'entity_uri': entity.uri,
'host_info': agent.get_host_info(
maas_store.host_info_types,
[arg.decode('utf-8')
for arg in request.args[b'include']],
entity_id,
self._session_store.clock)}],
'metadata': {'count': 1,
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None}})
@app.route('/v1.0/<string:tenant_id>/views/connections', methods=['GET'])
def view_connections(self, request, tenant_id):
"""
Lists agent connections.
"""
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
if b'agentId' not in request.args:
request.setResponseCode(400)
return json.dumps({'type': 'badRequest',
'code': 400,
'message': 'Validation error for key \'agentId\'',
'details': 'You must specify an agentId',
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
agent_ids = request.args[b'agentId']
decoded_agent_ids = [agent_id.decode(
"utf-8") for agent_id in agent_ids]
connections = [{'agent_id': agent_id,
'connections': [connection.to_json()
for connection in maas_store.list_connections_for_agent(
agent_id)]}
for agent_id in decoded_agent_ids]
return json.dumps({'values': connections,
'metadata': {'count': len(connections),
'limit': None,
'marker': None,
'next_marker': None,
'next_href': None}})
@app.route('/v1.0/<string:tenant_id>/agent_installers', methods=['POST'])
def agent_installer(self, request, tenant_id):
"""
URL of agent install script
"""
xsil = (b'https://monitoring.api.rackspacecloud.com'
b'/v1.0/00000/agent_installers/c69b2ceafc0444506fb32255af3d9be3.sh')
status = 201
request.setResponseCode(status)
request.setHeader(b'x-shell-installer-location', xsil)
self._audit('agent_installers', request, tenant_id,
status, request.content.read())
return b''
@app.route('/v1.0/<string:tenant_id>/notifications', methods=['POST'])
def create_notification(self, request, tenant_id):
"""
Create notification target
"""
content = request.content.read()
new_n = create_notification(
self._session_store.clock, json.loads(content.decode("utf-8")))
notifications = self._entity_cache_for_tenant(tenant_id).notifications
notifications[new_n.id] = new_n
status = 201
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + new_n.id.encode('utf-8'))
request.setHeader(b'x-object-id', new_n.id.encode('utf-8'))
self._audit('notifications', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/notifications', methods=['GET'])
def get_notifications(self, request, tenant_id):
"""
Get notification targets
"""
notifications = self._entity_cache_for_tenant(tenant_id).notifications
metadata = {'count': len(notifications),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': [nt.to_json() for nt in notifications.values()],
'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/notifications/<string:nt_id>', methods=['PUT'])
def update_notifications(self, request, tenant_id, nt_id):
"""
Updates notification targets
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
update_kwargs = dict(postdata)
notifications = self._entity_cache_for_tenant(tenant_id).notifications
try:
notification = _get_notification(notifications, nt_id)
notification.update(
clock=self._session_store.clock, **update_kwargs)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('notifications', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('notifications', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/notifications/<string:nt_id>', methods=['DELETE'])
def delete_notification(self, request, tenant_id, nt_id):
"""
Delete a notification
"""
notifications = self._entity_cache_for_tenant(tenant_id).notifications
try:
_delete_notification(notifications, nt_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('notifications', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
self._audit('notifications', request, tenant_id, status)
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/notification_plans', methods=['POST'])
def create_notificationplan(self, request, tenant_id):
"""
Creates a new notificationPlans
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
notification_plans = self._entity_cache_for_tenant(
tenant_id).notification_plans
newnp = create_notification_plan(self._session_store.clock, postdata)
notification_plans[newnp.id] = newnp
status = 201
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newnp.id.encode('utf-8'))
request.setHeader(b'x-object-id', newnp.id.encode('utf-8'))
self._audit('notification_plans', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/notification_plans', methods=['GET'])
def get_notification_plans(self, request, tenant_id):
"""
Get all notification plans
"""
np_list = self._entity_cache_for_tenant(
tenant_id).notification_plans.values()
metadata = {'count': len(np_list),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': [np.to_json() for np in np_list], 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/notification_plans/<string:np_id>', methods=['GET'])
def get_notification_plan(self, request, tenant_id, np_id):
"""
Get specific notif plan
"""
notification_plans = self._entity_cache_for_tenant(
tenant_id).notification_plans
return _map_getter(notification_plans, request, 'NotificationPlan', np_id)
@app.route('/v1.0/<string:tenant_id>/notification_plans/<string:np_id>', methods=['PUT'])
def update_notification_plan(self, request, tenant_id, np_id):
"""
Alter a notification plan
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
update_kwargs = dict(postdata)
notification_plans = self._entity_cache_for_tenant(
tenant_id).notification_plans
try:
notification_plan = _get_notification_plan(
notification_plans, np_id)
notification_plan.update(
clock=self._session_store.clock, **update_kwargs)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('notification_plans', request,
tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('notification_plans', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/notification_plans/<string:np_id>', methods=['DELETE'])
def delete_notification_plan(self, request, tenant_id, np_id):
"""
Remove a notification plan
"""
notification_plans = self._entity_cache_for_tenant(
tenant_id).notification_plans
entities = self._entity_cache_for_tenant(tenant_id).entities
alarmids_using_np = [alarm.id
for entity in entities.values()
for alarm in entity.alarms.values()
if alarm.notification_plan_id == np_id]
if len(alarmids_using_np):
status = 403
request.setResponseCode(status)
err_message = ('Notification plans cannot be removed while alarms ' +
'are using it: {0}'.format(' '.join(alarmids_using_np)))
self._audit('notification_plans', request, tenant_id, status)
return json.dumps({'type': 'forbiddenError',
'code': status,
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf',
'message': err_message,
'details': err_message})
try:
_delete_notification_plan(notification_plans, np_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('notification_plans', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
self._audit('notification_plans', request, tenant_id, status)
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/suppressions', methods=['GET'])
def get_suppressions(self, request, tenant_id):
"""
Get the list of suppressions for this tenant.
"""
sp_list = self._entity_cache_for_tenant(
tenant_id).suppressions.values()
metadata = {
'count': len(sp_list),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None
}
request.setResponseCode(200)
return json.dumps({'values': [sp.to_json() for sp in sp_list], 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/suppressions/<string:sp_id>', methods=['GET'])
def get_suppression(self, request, tenant_id, sp_id):
"""
Get a suppression by ID.
"""
suppressions = self._entity_cache_for_tenant(tenant_id).suppressions
return _map_getter(suppressions, request, 'Suppression', sp_id)
@app.route('/v1.0/<string:tenant_id>/suppressions', methods=['POST'])
def create_suppression(self, request, tenant_id):
"""
Create a new suppression.
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
newsp = create_suppression(self._session_store.clock, postdata)
suppressions = self._entity_cache_for_tenant(tenant_id).suppressions
suppressions[newsp.id] = newsp
status = 201
request.setResponseCode(status)
request.setHeader(b'location', base_uri_from_request(request).rstrip('/').encode('utf-8') +
request.path + b'/' + newsp.id.encode('utf-8'))
request.setHeader(b'x-object-id', newsp.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
self._audit('suppressions', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/suppressions/<string:sp_id>', methods=['PUT'])
def update_suppression(self, request, tenant_id, sp_id):
"""
Update a suppression.
"""
content = request.content.read()
postdata = json.loads(content.decode("utf-8"))
update_kwargs = dict(postdata)
suppressions = self._entity_cache_for_tenant(tenant_id).suppressions
try:
suppression = _get_suppression(suppressions, sp_id)
suppression.update(
clock=self._session_store.clock, **update_kwargs)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('suppressions', request, tenant_id, e.code, content)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
request.setHeader(b'content-type', b'text/plain')
self._audit('suppressions', request, tenant_id, status, content)
return b''
@app.route('/v1.0/<string:tenant_id>/suppressions/<string:sp_id>', methods=['DELETE'])
def delete_suppression(self, request, tenant_id, sp_id):
"""
Delete a suppression.
"""
suppressions = self._entity_cache_for_tenant(tenant_id).suppressions
try:
_delete_suppression(suppressions, sp_id)
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
self._audit('suppressions', request, tenant_id, e.code)
return json.dumps(e.to_json())
status = 204
request.setResponseCode(status)
self._audit('suppressions', request, tenant_id, status)
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/monitoring_zones', methods=['GET'])
def list_monitoring_zones(self, request, tenant_id):
"""
Lists the monitoring zones
"""
mzs = monitoring_zones()
metadata = {
'count': len(mzs),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None
}
request.setResponseCode(200)
return json.dumps({'values': mzs, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/alarm_examples', methods=['GET'])
def list_alarm_examples(self, request, tenant_id):
"""
Lists all of the alarm examples.
"""
axs = alarm_examples()
metadata = {
'count': len(axs),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None
}
request.setResponseCode(200)
return json.dumps({'values': axs, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/views/alarmCountsPerNp', methods=['GET'])
def alarm_counts_per_np(self, request, tenant_id):
"""
All NotificationPlans a number of alarms pointing to them.
"""
notification_plans = self._entity_cache_for_tenant(
tenant_id).notification_plans
entities = self._entity_cache_for_tenant(tenant_id).entities
values = [{'notification_plan_id': np.id,
'alarm_count': len([alarm
for entity in entities.values()
for alarm in entity.alarms.values()
if alarm.notification_plan_id == np.id])}
for np in notification_plans.values()]
metadata = {'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None,
'count': len(values)}
request.setResponseCode(200)
return json.dumps({'values': values, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/views/alarmsByNp/<string:np_id>', methods=['GET'])
def alarms_by_np(self, request, tenant_id, np_id):
"""
List of alarms pointing to a particular NotificationPlan
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
values = [alarm.to_json()
for entity in entities.values()
for alarm in entity.alarms.values()
if alarm.notification_plan_id == np_id]
metadata = {'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None,
'count': len(values)}
request.setResponseCode(200)
return json.dumps({'values': values, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/notification_types', methods=['GET'])
def get_notification_types(self, request, tenant_id):
"""
Get the types of notifications supported: pageduty,email,sms, etc
"""
ntlist = self._entity_cache_for_tenant(
tenant_id).notificationtypes_list
metadata = {'count': len(ntlist),
'limit': 100,
'marker': None,
'next_marker': None,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': ntlist, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/views/metric_list', methods=['GET'])
def views_metric_list(self, request, tenant_id):
"""
All available metrics.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
values = [_metric_list_for_entity(maas_store, entity)
for entity in entities.values()]
metadata = {'count': len(values),
'marker': None,
'next_marker': None,
'limit': 1000,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'metadata': metadata, 'values': values})
@app.route('/v1.0/<string:tenant_id>/__experiments/multiplot', methods=['POST'])
def multiplot(self, request, tenant_id):
"""
datapoints for all metrics requested
Right now, only checks of type remote.ping work
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
content = request.content.read()
multiplot_request = json.loads(content.decode("utf-8"))
requested_check_ids = set([metric['check_id']
for metric in multiplot_request['metrics']])
checks_by_id = {check.id: check
for entity in entities.values()
for check in entity.checks.values()
if check.id in requested_check_ids}
for requested_metric in multiplot_request['metrics']:
if requested_metric['check_id'] not in checks_by_id:
status = 400
request.setResponseCode(status)
self._audit('rollups', request, tenant_id, status, content)
return json.dumps({
'type': 'requiredNotFoundError',
'code': status,
'message': 'Required object does not exist',
'details': 'Object "Check" with key "{0},{1}" does not exist'.format(
requested_metric['entity_id'], requested_metric['check_id']),
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
multiplot_metrics = [_compute_multiplot(maas_store,
metric['entity_id'],
checks_by_id[
metric['check_id']],
metric['metric'],
int(request.args[b'from'][0]),
int(request.args[b'to'][0]),
int(request.args[b'points'][0]))
for metric in multiplot_request['metrics']]
status = 200
request.setResponseCode(200)
self._audit('rollups', request, tenant_id, status, content)
return json.dumps({'metrics': multiplot_metrics})
@app.route('/v1.0/<string:tenant_id>/views/latest_alarm_states', methods=['GET'])
def latest_alarm_states(self, request, tenant_id):
"""
Gets entities grouped with their latest alarm states.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
values = [{'entity_id': entity.id,
'entity_uri': entity.uri,
'entity_label': entity.label,
'latest_alarm_states': [
state.detail_json()
for state in maas_store.latest_alarm_states_for_entity(entity.id)]}
for entity in entities.values()]
metadata = {'count': len(values),
'marker': None,
'next_marker': None,
'limit': 1000,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': values, 'metadata': metadata})
@app.route('/v1.0/<string:tenant_id>/changelogs/alarms', methods=['GET'])
def change_logs(self, request, tenant_id):
"""
Gets entities, checks and alarms grouped with their latest alarm states.
"""
all_alarm_states = self._entity_cache_for_tenant(
tenant_id).maas_store.alarm_states
values = [{'id': text_type(uuid4()), # probably "correct" would be each_alarm_state.id
'timestamp': int(1000 * self._session_store.clock.seconds()),
'entity_id': each_alarm_state.entity_id,
'alarm_id': each_alarm_state.alarm_id,
'check_id': each_alarm_state.check_id,
'status': each_alarm_state.status,
'state': each_alarm_state.state,
'previous_state': each_alarm_state.previous_state,
'observation': each_alarm_state.status,
'analyzed_by_monitoring_zone_id': each_alarm_state.analyzed_by_monitoring_zone_id}
for each_alarm_state in all_alarm_states]
metadata = {'count': len(values),
'marker': None,
'next_marker': None,
'limit': 1000,
'next_href': None}
request.setResponseCode(200)
return json.dumps({'values': values, 'metadata': metadata})
@implementer(IAPIMock, IPlugin)
@attr.s
class MaasControlApi(object):
"""
This class registers the MaaS controller API in the service catalog.
"""
maas_api = attr.ib()
def catalog_entries(self, tenant_id):
"""
List catalog entries for the MaaS API.
"""
return [
Entry(
tenant_id, "rax:monitor", "cloudMonitoringControl",
[
Endpoint(tenant_id, region, text_type(uuid4()),
"v1.0")
for region in self.maas_api._regions
]
)
]
def resource_for_region(self, region, uri_prefix, session_store):
"""
Get an :obj:`twisted.web.iweb.IResource` for the given URI prefix;
implement :obj:`IAPIMock`.
"""
maas_controller = MaasController(api_mock=self,
session_store=session_store,
region=region)
return maas_controller.app.resource()
@attr.s
class MaasController(object):
"""
Klein routes for MaaS control API.
"""
api_mock = attr.ib()
session_store = attr.ib()
region = attr.ib()
def _entity_cache_for_tenant(self, tenant_id):
"""
Retrieve the M_cache object containing all objects created so far
"""
clock = self.session_store.clock
return (self.session_store.session_for_tenant_id(tenant_id)
.data_for_api(self.api_mock.maas_api, _mcache_factory(clock))[self.region])
app = MimicApp()
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/test_response',
methods=['PUT'])
def set_test_alarm_response(self, request, tenant_id, entity_id):
"""
Sets the test-alarm response for a given entity.
"""
test_responses = self._entity_cache_for_tenant(
tenant_id).test_alarm_responses
dummy_response = json_from_request(request)
test_responses[entity_id] = []
for response_block in dummy_response:
ith_response = {'state': response_block['state']}
if 'status' in response_block:
ith_response['status'] = response_block['status']
test_responses[entity_id].append(ith_response)
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/test_errors',
methods=['POST'])
def push_test_alarm_error(self, request, tenant_id, entity_id):
"""
Creates a new error response that will be returned from the
test-alarm API the next time it is called for this entity.
"""
test_alarm_errors = self._entity_cache_for_tenant(
tenant_id).test_alarm_errors
request_body = json_from_request(request)
if entity_id not in test_alarm_errors:
test_alarm_errors[entity_id] = collections.deque()
error_obj = {'id': 'er' + random_hex_generator(4),
'code': request_body['code'],
'response': request_body['response']}
test_alarm_errors[entity_id].append(error_obj)
request.setResponseCode(201)
request.setHeader(b'x-object-id', error_obj['id'].encode('utf-8'))
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms/test_response',
methods=['DELETE'])
def clear_test_alarm_response(self, request, tenant_id, entity_id):
"""
Clears the test-alarm response and restores normal behavior.
"""
test_responses = self._entity_cache_for_tenant(
tenant_id).test_alarm_responses
del test_responses[entity_id]
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks' +
'/test_responses/<string:check_type>', methods=['PUT'])
def set_test_check_overrides(self, request, tenant_id, entity_id, check_type):
"""
Sets overriding behavior on the test-check handler for a given
entity ID and check type.
"""
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
check_type_ins = maas_store.check_types[check_type]
overrides = json_from_request(request)
check_id = '__test_check'
ench_key = (entity_id, check_id)
for override in overrides:
if 'available' in override:
check_type_ins.test_check_available[
ench_key] = override['available']
if 'status' in override:
check_type_ins.test_check_status[ench_key] = override['status']
metrics_dict = override.get('metrics', {})
for metric_name in metrics_dict:
test_check_metric = check_type_ins.get_metric_by_name(
metric_name)
kwargs = {'entity_id': entity_id,
'check_id': check_id,
'override_fn': lambda _: metrics_dict[metric_name]['data']}
if 'monitoring_zone_id' in override:
kwargs['monitoring_zone'] = override['monitoring_zone_id']
test_check_metric.set_override(**kwargs)
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks' +
'/test_responses/<string:check_type>', methods=['DELETE'])
def clear_test_check_overrides(self, request, tenant_id, entity_id, check_type):
"""
Clears overriding behavior on a test-check handler.
"""
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
check_type_ins = maas_store.check_types[check_type]
check_type_ins.clear_overrides()
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/alarms' +
'/<string:alarm_id>/states', methods=['POST'])
def create_alarm_state(self, request, tenant_id, entity_id, alarm_id):
"""
Adds a new alarm state to the collection of alarm states.
"""
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
entities = self._entity_cache_for_tenant(tenant_id).entities
request_body = json_from_request(request)
alarm = None
try:
alarm = _get_alarm(entities, entity_id, alarm_id)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
previous_state = u'UNKNOWN'
alarm_states_same_entity_and_alarm = [
state for state in maas_store.alarm_states
if state.entity_id == entity_id and state.alarm_id == alarm_id]
if len(alarm_states_same_entity_and_alarm) > 0:
previous_state = alarm_states_same_entity_and_alarm[-1].state
monitoring_zone_id = request_body.get(
'analyzed_by_monitoring_zone_id', u'mzord')
new_state = None
try:
new_state = AlarmState(alarm_id=alarm_id,
entity_id=entity_id,
check_id=alarm.check_id,
alarm_label=alarm.label,
analyzed_by_monitoring_zone_id=monitoring_zone_id,
previous_state=previous_state,
state=request_body['state'],
status=request_body['status'],
timestamp=int(1000 * self.session_store.clock.seconds()))
except KeyError as e:
missing_key = e.args[0]
status = 400
request.setResponseCode(status)
return json.dumps({'type': 'badRequest',
'code': status,
'message': 'Validation error for key \'{0}\''.format(missing_key),
'details': 'Missing required key ({0})'.format(missing_key),
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf'})
maas_store.alarm_states.append(new_state)
request.setResponseCode(201)
request.setHeader(b'x-object-id', new_state.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/checks' +
'/<string:check_id>/metrics/<string:metric_name>', methods=['PUT'])
def set_metric_override(self, request, tenant_id, entity_id, check_id, metric_name):
"""
Sets overrides on a metric.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
check = None
try:
check = _get_check(entities, entity_id, check_id)
except (ObjectDoesNotExist, ParentDoesNotExist) as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
metric = maas_store.check_types[
check.type].get_metric_by_name(metric_name)
request_body = json_from_request(request)
monitoring_zones = request_body.get('monitoring_zones', ['__AGENT__'])
override_type = request_body['type']
override_options = request_body.get('options', {})
override_fn = None
if override_type == 'squarewave':
fn_period = int(override_options.get('period', 10 * 60 * 1000))
half_period = fn_period / 2
fn_min = override_options.get('min', 20)
fn_max = override_options.get('max', 80)
fn_offset = int(override_options.get('offset', 0))
override_fn = (lambda t: (fn_min
if ((t + fn_offset) % fn_period) < half_period
else fn_max))
else:
request.setResponseCode(400)
return json.dumps({'type': 'badRequest',
'code': 400,
'message': 'Validation error for key \'type\'',
'details': 'Unknown value for "type": "{0}"'.format(override_type)})
for monitoring_zone in monitoring_zones:
metric.set_override(
entity_id=entity_id,
check_id=check_id,
monitoring_zone=monitoring_zone,
override_fn=override_fn)
request.setResponseCode(204)
return b''
@app.route('/v1.0/<string:tenant_id>/entities/<string:entity_id>/agents', methods=['POST'])
def create_agent(self, request, tenant_id, entity_id):
"""
Creates or overwrites an agent on the entity.
"""
entities = self._entity_cache_for_tenant(tenant_id).entities
maas_store = self._entity_cache_for_tenant(tenant_id).maas_store
agent = None
try:
entity = _get_entity(entities, entity_id)
agent = Agent()
entity.agent_id = agent.id
except ObjectDoesNotExist as e:
request.setResponseCode(e.code)
return json.dumps(e.to_json())
maas_store.agents[agent.id] = agent
request.setResponseCode(201)
request.setHeader(b'x-object-id', agent.id.encode('utf-8'))
request.setHeader(b'content-type', b'text/plain')
return b''
|
checkov/terraform/checks/resource/aws/DocDBAuditLogs.py | vangundy-jason-pfg/checkov | 4,013 | 12619401 | from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
class DocDBAuditLogs(BaseResourceCheck):
def __init__(self):
name = "Ensure DocDB has audit logs enabled"
id = "CKV_AWS_104"
supported_resources = ['aws_docdb_cluster_parameter_group']
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
self.evaluated_keys = ['parameter']
if 'parameter' in conf:
elements = conf["parameter"]
for idx, elem in enumerate(conf["parameter"]):
if isinstance(elem, dict) and elem["name"][0] == "audit_logs" and elem["value"][0] == "enabled":
self.evaluated_keys = [f'parameter/[{idx}]/name', f'parameter/[{idx}]/value']
return CheckResult.PASSED
return CheckResult.FAILED
check = DocDBAuditLogs()
|
libsparse/simg_dump.py | XperiaZProject/system_core | 1,144 | 12619410 | <reponame>XperiaZProject/system_core<filename>libsparse/simg_dump.py
#! /usr/bin/env python
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import getopt, posixpath, signal, struct, sys
def usage(argv0):
print("""
Usage: %s [-v] sparse_image_file ...
-v verbose output
""" % ( argv0 ))
sys.exit(2)
def main():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
me = posixpath.basename(sys.argv[0])
# Parse the command line
verbose = 0 # -v
try:
opts, args = getopt.getopt(sys.argv[1:],
"v",
["verbose"])
except getopt.GetoptError, e:
print(e)
usage(me)
for o, a in opts:
if o in ("-v", "--verbose"):
verbose += 1
else:
print("Unrecognized option \"%s\"" % (o))
usage(me)
if len(args) == 0:
print("No sparse_image_file specified")
usage(me)
for path in args:
FH = open(path, 'rb')
header_bin = FH.read(28)
header = struct.unpack("<I4H4I", header_bin)
magic = header[0]
major_version = header[1]
minor_version = header[2]
file_hdr_sz = header[3]
chunk_hdr_sz = header[4]
blk_sz = header[5]
total_blks = header[6]
total_chunks = header[7]
image_checksum = header[8]
if magic != 0xED26FF3A:
print("%s: %s: Magic should be 0xED26FF3A but is 0x%08X"
% (me, path, magic))
continue
if major_version != 1 or minor_version != 0:
print("%s: %s: I only know about version 1.0, but this is version %u.%u"
% (me, path, major_version, minor_version))
continue
if file_hdr_sz != 28:
print("%s: %s: The file header size was expected to be 28, but is %u."
% (me, path, file_hdr_sz))
continue
if chunk_hdr_sz != 12:
print("%s: %s: The chunk header size was expected to be 12, but is %u."
% (me, path, chunk_hdr_sz))
continue
print("%s: Total of %u %u-byte output blocks in %u input chunks."
% (path, total_blks, blk_sz, total_chunks))
if image_checksum != 0:
print("checksum=0x%08X" % (image_checksum))
if not verbose:
continue
print(" input_bytes output_blocks")
print("chunk offset number offset number")
offset = 0
for i in xrange(1,total_chunks+1):
header_bin = FH.read(12)
header = struct.unpack("<2H2I", header_bin)
chunk_type = header[0]
reserved1 = header[1]
chunk_sz = header[2]
total_sz = header[3]
data_sz = total_sz - 12
print("%4u %10u %10u %7u %7u" % (i, FH.tell(), data_sz, offset, chunk_sz),
end=" ")
if chunk_type == 0xCAC1:
if data_sz != (chunk_sz * blk_sz):
print("Raw chunk input size (%u) does not match output size (%u)"
% (data_sz, chunk_sz * blk_sz))
break;
else:
print("Raw data", end="")
FH.read(data_sz)
elif chunk_type == 0xCAC2:
if data_sz != 4:
print("Fill chunk should have 4 bytes of fill, but this has %u"
% (data_sz), end="")
break;
else:
fill_bin = FH.read(4)
fill = struct.unpack("<I", fill_bin)
print("Fill with 0x%08X" % (fill))
elif chunk_type == 0xCAC3:
if data_sz != 0:
print("Don't care chunk input size is non-zero (%u)" % (data_sz))
break;
else:
print("Don't care", end="")
elif chunk_type == 0xCAC4:
if data_sz != 4:
print("CRC32 chunk should have 4 bytes of CRC, but this has %u"
% (data_sz), end="")
break;
else:
crc_bin = FH.read(4)
crc = struct.unpack("<I", crc_bin)
print("Unverified CRC32 0x%08X" % (crc))
else:
print("Unknown chunk type 0x%04X" % (chunk_type), end="")
break;
if verbose > 1:
header = struct.unpack("<12B", header_bin)
print(" (%02X%02X %02X%02X %02X%02X%02X%02X %02X%02X%02X%02X)"
% (header[0], header[1], header[2], header[3],
header[4], header[5], header[6], header[7],
header[8], header[9], header[10], header[11]))
else:
print()
offset += chunk_sz
print(" %10u %7u End" % (FH.tell(), offset))
if total_blks != offset:
print("The header said we should have %u output blocks, but we saw %u"
% (total_blks, offset))
junk_len = len(FH.read())
if junk_len:
print("There were %u bytes of extra data at the end of the file."
% (junk_len))
sys.exit(0)
if __name__ == "__main__":
main()
|
netmiko4/send_multiline/old_solution_timing/ping.py | fallenfuzz/pynet | 528 | 12619414 | from getpass import getpass
from netmiko import ConnectHandler
device = {
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": getpass(),
"device_type": "cisco_ios",
"session_log": "my_output.txt",
}
with ConnectHandler(**device) as net_connect:
cmd = "ping"
target_ip = "8.8.8.8"
count = "30"
output = net_connect.send_command_timing(
cmd, strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing(
"\n", strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing(
target_ip, strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing(
count, strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing(
"\n", strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing(
"\n", strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing(
"\n", strip_prompt=False, strip_command=False
)
output += net_connect.send_command_timing(
"\n", strip_prompt=False, strip_command=False
)
print(output)
|
orgexporter.py | Sinamore/orgextended | 120 | 12619455 | import sublime
import sublime_plugin
import re
import regex
from pathlib import Path
import os
import fnmatch
import logging
import sys
import traceback
import OrgExtended.asettings as sets
import OrgExtended.orgdb as db
import OrgExtended.orgparse.date as date
log = logging.getLogger(__name__)
RE_TITLE = regex.compile(r"^\s*[#][+](TITLE|title)[:]\s*(?P<data>.*)")
RE_AUTHOR = regex.compile(r"^\s*[#][+](AUTHOR|author)[:]\s*(?P<data>.*)")
RE_NAME = regex.compile(r"^\s*[#][+](NAME|name)[:]\s*(?P<data>.*)")
RE_DATE = regex.compile(r"^\s*[#][+](DATE|date)[:]\s*(?P<data>.*)")
RE_EMAIL = regex.compile(r"^\s*[#][+](EMAIL|email)[:]\s*(?P<data>.*)")
RE_LANGUAGE = regex.compile(r"^\s*[#][+](LANGUAGE|language)[:]\s*(?P<data>.*)")
def ExportFilename(view,extension,suffix=""):
fn = view.file_name()
fn,ext = os.path.splitext(fn)
return fn + suffix + extension
def GetGlobalOption(file, name, settingsName, defaultValue):
value = sets.Get(settingsName, defaultValue)
value = ' '.join(file.org[0].get_comment(name, [str(value)]))
return value
class OrgExporter:
def __init__(self,filename,file,**kwargs):
self.file = file
self.fs = open(filename,"w",encoding="utf-8")
self.outputFilename = filename
self.InitExportComments()
self.PreScan()
def InitExportComments(self):
self.title = None
self.author = None
self.language = None
self.email = None
self.date = None
self.name = None
def GetOption(self,name,settingsName,defaultValue):
return GetGlobalOption(self.file, name, settingsName, defaultValue)
def PreScanExportCommentsGather(self, l):
m = RE_TITLE.match(l)
if(m):
self.title = m.captures('data')[0]
return True
m = RE_AUTHOR.match(l)
if(m):
self.author = m.captures('data')[0]
return True
m = RE_LANGUAGE.match(l)
if(m):
self.language = m.captures('data')[0]
return True
m = RE_EMAIL.match(l)
if(m):
self.email = m.captures('data')[0]
return True
m = RE_DATE.match(l)
if(m):
self.date = m.captures('data')[0]
return True
m = RE_NAME.match(l)
if(m):
self.name = m.captures('data')[0]
return True
# Called at the start of export to scan the file for game changing properties
def PreScan(self):
for l in self.file.org._lines:
self.PreScanExportCommentsGather(l)
self.PreScanCustom(l)
self.PostPreScanCustom()
# This is called when the document is being destroyed
def Close(self):
self.FinishDocCustom()
self.fs.close()
# Override this to add to the pre-scan phase
def PreScanCustom(self,l):
pass
# Called after the pre scan is complete
def PostPreScanCustom(self):
pass
# Override this to close off the document for exporting
def FinishDocCustom(self):
pass
# Document header metadata should go in here
def AddExportMetaCustom(self):
pass
# Setup to start the export of a node
def StartNode(self, n):
pass
# Export the heading of this node
def NodeHeading(self,n):
pass
# We are about to start exporting the nodes body
def StartNodeBody(self,n):
pass
# Actually buid the node body in the document
def NodeBody(self,n):
pass
# We are done exporting the nodes body so finish it off
def EndNodeBody(self,n):
pass
# We are now done the node itself so finish that off
def EndNode(self,n):
pass
# def about to start exporting nodes
def StartNodes(self):
pass
# done exporting nodes
def EndNodes(self):
pass
def StartDocument(self, file):
pass
def EndDocument(self):
pass
def InsertScripts(self,file):
pass
def StartHead(self):
pass
def EndHead(self):
pass
def StartBody(self):
pass
def EndBody(self):
pass
class OrgExportHelper:
def __init__(self,view,index):
self.view = view
self.file = db.Get().FindInfo(self.view)
self.index = index
# Extend this for this format
def CustomBuildHead(self):
pass
def BuildHead(self):
self.CustomBuildHead()
self.doc.AddExportMetaCustom()
def BuildNode(self, n):
self.doc.StartNode(n)
self.doc.NodeHeading(n)
self.doc.StartNodeBody(n)
self.doc.NodeBody(n)
for c in n.children:
self.BuildNode(c)
self.doc.EndNodeBody(n)
self.doc.EndNode(n)
def BuildNodes(self):
if(self.index == None):
nodes = self.file.org
for n in nodes.children:
self.BuildNode(n)
else:
n = self.file.org.env._nodes[self.index]
self.BuildNode(n)
def BuildDocument(self):
self.doc.StartNodes()
self.BuildNodes()
self.doc.EndNodes()
def BuildBody(self):
self.doc.StartDocument(self.file)
self.BuildDocument()
self.doc.EndDocument()
self.doc.InsertScripts(self.file)
def Run(self,outputFilename,doc):
try:
self.doc = doc
self.doc.StartHead()
self.BuildHead()
self.doc.EndHead()
self.doc.StartBody()
self.BuildBody()
self.doc.EndBody()
finally:
if(None != self.doc):
self.doc.Close()
log.log(51,"EXPORT COMPLETE: " + str(outputFilename))
self.view.set_status("ORG_EXPORT","EXPORT COMPLETE: " + str(outputFilename))
sublime.set_timeout(self.ClearStatus, 1000*10)
def ClearStatus(self):
self.view.set_status("ORG_EXPORT","")
class BlockState:
def __init__(self,startre,endre,doc,exportEndLine=False):
self.startre = startre
self.endre = endre
self.e = doc
self.exportEndLine = exportEndLine
def Handle(self, lines, orgnode):
amIn = False
for line in lines:
if(amIn):
m = self.endre.search(line)
if(m):
amIn = False
self.e.SetAmInBlock(False)
self.HandleExiting(m,line,orgnode)
if(self.exportEndLine):
yield line
continue
else:
self.HandleIn(line,orgnode)
continue
else:
if(not self.e.AmInBlock()):
m = self.startre.search(line)
if(m):
amIn = True
self.e.SetAmInBlock(True)
self.HandleEntering(m,line,orgnode)
continue
yield line
if(amIn):
amIn = False
self.e.SetAmInBlock(False)
self.HandleExiting(None,None,orgnode)
def HandleIn(self, line, orgnode):
pass
def HandleExiting(self, m, line, orgnode):
pass
def HandleEntering(self, m, line, orgnode):
pass
class ListBlockState:
def __init__(self,listre,doc):
self.listre = listre
self.e = doc
def Handle(self, lines, orgnode):
inList = 0
curIndent = 0
for l in lines:
m = self.listre.search(l)
if(m):
thisIndent = len(m.group('indent'))
if(not inList):
if(not self.e.AmInBlock()):
curIndent = thisIndent
self.HandleEntering(m,l,orgnode)
inList += 1
else:
yield l
continue
elif(thisIndent > curIndent):
curIndent = thisIndent
self.HandleEntering(m,l,orgnode)
inList += 1
elif(thisIndent < curIndent and inList > 1):
inList -= 1
self.HandleExiting(m,l,orgnode)
startItem = len(self.e.doc)
self.StartHandleItem(m,l,orgnode)
yield m.group('data')
self.EndHandleItem(m,l,orgnode)
self.e.doc[startItem] = "".join(self.e.doc[startItem:])
del self.e.doc[-2:]
continue
elif(inList):
while(inList > 0):
inList -= 1
self.HandleExiting(m,l,orgnode)
yield l
else:
yield l
while(inList > 0):
inList -= 1
self.HandleExiting(m,l,orgnode)
def StartHandleItem(self, m, line, orgnode):
self.e.doc.append("")
def EndHandleItem(self,m,line,orgnode):
self.e.doc.append("")
def HandleExiting(self, m, line, orgnode):
pass
def HandleEntering(self, m, line, orgnode):
pass
class AttributeParser:
def __init__(self,name,sre,doc):
self.sre = sre
self.name = name
self.e = doc
def Handle(self, lines, orgnode):
for line in lines:
m = self.sre.search(line)
if(m):
self.HandleData(m,line,orgnode)
continue
yield line
def HandleData(self, m, line, orgnode):
self.e.AddAttrib(self.name,m.group('data'))
class StripParser:
def __init__(self,sre,doc):
self.sre = sre
self.e = doc
def Handle(self, lines, orgnode):
for line in lines:
m = self.sre.search(line)
if(m):
continue
yield line
# This parser expects the item to live and consume the entire line
# Match will replace the entire line
class LineParser:
def __init__(self,sre,doc):
self.sre = sre
self.e = doc
def Handle(self, lines, orgnode):
for line in lines:
m = self.sre.search(line)
if(m):
self.HandleLine(m,line,orgnode)
continue
yield line
def HandleLine(self,m,l,orgnode):
pass
# This parser expects the item to live and consume the entire line
# Match will replace the entire line
class NotInBlockLineParser:
def __init__(self,sre,doc):
self.sre = sre
self.e = doc
def Handle(self, lines, orgnode):
for line in lines:
if(not self.e.AmInBlock()):
m = self.sre.search(line)
if(m):
self.HandleLine(m,line,orgnode)
continue
yield line
def HandleLine(self,m,l,orgnode):
pass
# This parser expects a match to be "within" a line.
# This is complicated because we may still have to foward off
# the other portions of the line
class SubLineParser:
def __init__(self,sre,doc):
self.sre = sre
self.e = doc
def Handle(self, lines, orgnode):
for line in lines:
start = 0
llen = len(self.e.doc)
for m in self.sre.finditer(line):
s,e = m.span()
if(s >= start):
segment = line[start:s]
if(segment.strip() == ""):
self.e.doc.append(segment)
else:
yield segment
start = e
self.HandleSegment(m,line,orgnode)
if(start != 0 and len(line) > start):
segment = line[start:]
if(segment.strip() == ""):
self.e.doc.append(segment)
else:
yield segment
elif(start == 0):
yield line
# We generated more than one line here! need to collapse
nlen = len(self.e.doc) - llen
if(nlen > 1):
ls = "".join(self.e.doc[llen:])
del self.e.doc[-nlen:]
self.e.doc.append(ls)
def HandleSegment(self,m,l,orgnode):
pass
RE_STARTSRC = re.compile(r"^\s*#\+(BEGIN_SRC|begin_src)\s+(?P<lang>[a-zA-Z0-9]+)")
RE_ENDSRC = re.compile(r"^\s*#\+(END_SRC|end_src)")
class SourceBlockState(BlockState):
def __init__(self,doc):
super(SourceBlockState,self).__init__(RE_STARTSRC, RE_ENDSRC,doc)
RE_STARTDYN = re.compile(r"^\s*#\+(BEGIN[:]|begin[:])\s+(?P<lang>[a-zA-Z0-9]+)")
RE_ENDDYN = re.compile(r"^\s*#\+(end[:]|END[:])")
class DynamicBlockState(BlockState):
def __init__(self,doc):
super(DynamicBlockState,self).__init__(RE_STARTDYN, RE_ENDDYN,doc)
RE_STARTEXPORT = re.compile(r"^\s*#\+(BEGIN_EXPORT|begin_export)\s+(?P<lang>[a-zA-Z0-9]+)")
RE_ENDEXPORT = re.compile(r"^\s*#\+(END_EXPORT|end_export)")
class ExportBlockState(BlockState):
def __init__(self,doc):
super(ExportBlockState,self).__init__(RE_STARTEXPORT, RE_ENDEXPORT,doc)
RE_STARTQUOTE = re.compile(r"^\s*#\+(BEGIN_QUOTE|begin_quote)")
RE_ENDQUOTE = re.compile(r"^\s*#\+(END_QUOTE|end_quote)")
class QuoteBlockState(BlockState):
def __init__(self,doc):
super(QuoteBlockState,self).__init__(RE_STARTQUOTE, RE_ENDQUOTE,doc)
RE_STARTEXAMPLE = re.compile(r"^\s*#\+(BEGIN_EXAMPLE|begin_example)")
RE_ENDEXAMPLE = re.compile(r"^\s*#\+(END_EXAMPLE|end_example)")
class ExampleBlockState(BlockState):
def __init__(self,doc):
super(ExampleBlockState,self).__init__(RE_STARTEXAMPLE, RE_ENDEXAMPLE,doc)
RE_STARTNOTE = re.compile(r"^\s*#\+(BEGIN_NOTES|begin_notes)")
RE_ENDNOTE = re.compile(r"^\s*#\+(END_NOTES|end_notes)")
class NotesBlockState(BlockState):
def __init__(self,doc):
super(NotesBlockState,self).__init__(RE_STARTNOTE, RE_ENDNOTE,doc)
RE_STARTGENERIC = re.compile(r"#\+(BEGIN_|begin_)(?P<data>[a-zA-Z0-9-]+)(\s|$)")
RE_ENDGENERIC = re.compile(r"#\+(END_|end_)([a-zA-Z0-9-]+)(\s|$)")
class GenericBlockState(BlockState):
def __init__(self,doc):
super(GenericBlockState,self).__init__(RE_STARTGENERIC, RE_ENDGENERIC,doc)
RE_TABLE_ROW = re.compile(r"^\s*[|]")
RE_NOT_TABLE_ROW = re.compile(r"^\s*[^| \t]")
class TableBlockState(BlockState):
def __init__(self,doc):
super(TableBlockState,self).__init__(RE_TABLE_ROW, RE_NOT_TABLE_ROW,doc,True)
RE_DRAWER_LINE = re.compile(r"^\s*[:].+[:]\s*$")
RE_END_DRAWER_LINE = re.compile(r"^\s*[:](END|end)[:]\s*$")
class DrawerBlockState(BlockState):
def __init__(self,doc):
super(DrawerBlockState,self).__init__(RE_DRAWER_LINE, RE_END_DRAWER_LINE,doc)
RE_CAPTION = regex.compile(r"^\s*[#][+]CAPTION[:]\s*(?P<data>.*)")
class CaptionAttributeParser(AttributeParser):
def __init__(self,doc):
super(CaptionAttributeParser,self).__init__('caption',RE_CAPTION,doc)
RE_TBLFM = regex.compile(r"^\s*[#][+]TBLFM[:].*")
class TblFmStripper(StripParser):
def __init__(self,doc):
super(TblFmStripper,self).__init__(RE_TBLFM,doc)
RE_ATTR_HTML = re.compile(r"^\s*[#][+](ATTR_HTML|attr_html)[:].*")
class AttrHtmlStripper(StripParser):
def __init__(self,doc):
super(AttrHtmlStripper,self).__init__(RE_ATTR_HTML,doc)
RE_ATTR_ORG = re.compile(r"^\s*[#][+](ATTR_ORG|attr_org)[:].*")
class AttrOrgStripper(StripParser):
def __init__(self,doc):
super(AttrOrgStripper,self).__init__(RE_ATTR_ORG,doc)
RE_KEYWORDSTRIP = re.compile(r"^\s*[#][+](COLUMNS|columns|PRIORITIES|priorities|PLOT|plot|TODO|todo|TAGS|tags|PROPERTY|property)[:].*")
class KeywordStripper(StripParser):
def __init__(self,doc):
super(KeywordStripper,self).__init__(RE_KEYWORDSTRIP,doc)
RE_SCHEDULING_LINE = re.compile(r"^\s*(SCHEDULED|CLOSED|DEADLINE|CLOCK)[:].*")
class SchedulingStripper(StripParser):
def __init__(self,doc):
super(SchedulingStripper,self).__init__(RE_SCHEDULING_LINE,doc)
RE_UL = re.compile(r"^(?P<indent>\s*)(?P<listprefix>-|[+])\s+((?P<definition>[a-zA-Z0-9_-]+?)\s*[:][:]\s*)?(?P<data>.+)")
class UnorderedListBlockState(ListBlockState):
def __init__(self,doc):
super(UnorderedListBlockState,self).__init__(RE_UL,doc)
RE_CL = re.compile(r"^(?P<indent>\s*)(?P<listprefix>(-|[+])\s+\[(?P<state>[ xX-])\])\s+((?P<definition>[a-zA-Z0-9_-]+?)\s*[:][:]\s*)?(?P<data>.+)")
class CheckboxListBlockState(ListBlockState):
def __init__(self,doc):
super(CheckboxListBlockState,self).__init__(RE_CL,doc)
RE_OL = re.compile(r"^(?P<indent>\s*)(?P<listprefix>[0-9]+[).])\s+((?P<definition>[a-zA-Z0-9_-]+?)\s*[:][:]\s*)?(?P<data>.+)")
class OrderedListBlockState(ListBlockState):
def __init__(self,doc):
super(OrderedListBlockState,self).__init__(RE_OL,doc)
RE_BOLD = re.compile(r"\*(?P<data>.+?)\*")
RE_ITALICS = re.compile(r"/(?P<data>.+?)/")
RE_UNDERLINE = re.compile(r"[_](?P<data>.+?)[_]")
RE_STRIKETHROUGH = re.compile(r"\+(?P<data>.+?)\+")
RE_CODE = re.compile(r"~(?P<data>.+?)~")
RE_VERBATIM = re.compile(r"=(?P<data>.+?)=")
class BoldParser(SubLineParser):
def __init__(self,doc):
super(BoldParser,self).__init__(RE_BOLD,doc)
class ItalicsParser(SubLineParser):
def __init__(self,doc):
super(ItalicsParser,self).__init__(RE_ITALICS,doc)
class UnderlineParser(SubLineParser):
def __init__(self,doc):
super(UnderlineParser,self).__init__(RE_UNDERLINE,doc)
class StrikethroughParser(SubLineParser):
def __init__(self,doc):
super(StrikethroughParser,self).__init__(RE_STRIKETHROUGH,doc)
class CodeParser(SubLineParser):
def __init__(self,doc):
super(CodeParser,self).__init__(RE_CODE,doc)
class VerbatimParser(SubLineParser):
def __init__(self,doc):
super(VerbatimParser,self).__init__(RE_VERBATIM,doc)
RE_LINK = re.compile(r"\[\[(?P<link>[^\]]+)\](\[(?P<desc>[^\]]+)\])?\]")
class LinkParser(SubLineParser):
def __init__(self,doc):
super(LinkParser,self).__init__(RE_LINK,doc)
RE_HR = re.compile(r"^((\s*-----+\s*)|(\s*---\s+[a-zA-Z0-9 ]+\s+---\s*))$")
class HrParser(LineParser):
def __init__(self,doc):
super(HrParser,self).__init__(RE_HR,doc)
RE_TARGET = regex.compile(r"<<(?P<data>.+?)>>")
class TargetParser(SubLineParser):
def __init__(self,doc):
super(TargetParser,self).__init__(RE_TARGET,doc)
RE_MATH = regex.compile(r"\$(?P<data>.+?)\$")
class MathParser(SubLineParser):
def __init__(self,doc):
super(MathParser,self).__init__(RE_MATH,doc)
RE_INLMATH = regex.compile(r"\\\((?P<data>.+?)\\\)")
class InlineMathParser(SubLineParser):
def __init__(self,doc):
super(InlineMathParser,self).__init__(RE_INLMATH,doc)
RE_EQMATH = regex.compile(r"\\\[(?P<data>.+?)\\\]")
class EqMathParser(SubLineParser):
def __init__(self,doc):
super(EqMathParser,self).__init__(RE_EQMATH,doc)
RE_EMPTY = re.compile(r"^\s*$")
class EmptyParser(NotInBlockLineParser):
def __init__(self,doc):
super(EmptyParser,self).__init__(RE_EMPTY,doc)
class ActiveDateParser(LineParser):
def __init__(self,doc):
super(ActiveDateParser,self).__init__(date.gene_timestamp_regex('active'),doc)
class InactiveDateParser(LineParser):
def __init__(self,doc):
super(InactiveDateParser,self).__init__(date.gene_timestamp_regex('inactive'),doc)
class NameParser(LineParser):
def __init__(self,doc):
super(NameParser,self).__init__(RE_NAME,doc)
RE_LATEX_HEADER = regex.compile(r"^\s*[#][+](LATEX_HEADER|latex_header)[:]\s*(?P<data>.*)")
class LatexHeaderParser(LineParser):
def __init__(self,doc):
super(LatexHeaderParser,self).__init__(RE_LATEX_HEADER,doc)
RE_LATEX_CLASS_OPTIONS = regex.compile(r"^\s*[#][+](LATEX_CLASS_OPTIONS|latex_class_options)[:]\s*(?P<data>.*)")
class LatexClassOptionsParser(LineParser):
def __init__(self,doc):
super(LatexClassOptionsParser,self).__init__(RE_LATEX_CLASS_OPTIONS,doc)
RE_SETUPFILE = regex.compile(r"^\s*[#][+](SETUPFILE|setupfile)[:]\s*(?P<data>.*)")
class SetupFileParser(LineParser):
def __init__(self,doc):
super(SetupFileParser,self).__init__(RE_SETUPFILE,doc)
def Handle(self, lines, orgnode):
for line in lines:
m = self.sre.search(line)
if(m):
filename = m.group('data').strip()
try:
with open(filename,"r") as f:
for setupline in f:
yield setupline
except:
log.warning("Setup file not found: " + str(filename))
continue
yield line
RE_RESULTS = regex.compile(r"^\s*[#][+](RESULTS|results)[:]\s*(?P<data>.*)")
class ResultsParser(LineParser):
def __init__(self,doc):
super(ResultsParser,self).__init__(RE_RESULTS,doc)
def Handle(self, lines, orgnode):
skip = False
for line in lines:
if(skip):
if(line.strip() == ""):
skip = False
elif(RE_ENDSRC.search(line) or RE_END_DRAWER_LINE.search(line)):
skip = False
continue
m = self.sre.search(line)
if(m):
if(hasattr(self.e.doc,'sparams')):
exp = self.e.doc.sparams.Get("exports","")
if(exp == 'code' or exp == 'non'):
skip = True
continue
else:
continue
yield line
|
facelib/InsightFace/verifier.py | tuncayka/FaceLib | 147 | 12619458 | import cv2
import torch
import argparse
from facelib import get_config, special_draw
from facelib import update_facebank, load_facebank
from facelib import FaceRecognizer
from facelib import FaceDetector
class WebcamVerify:
"""
WebcamVerify: face verfication with cv2
if you add new person in to facebank
pass update True
"""
def __init__(self, update=True, tta=True, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")):
print('loading ...')
self.tta = tta
self.detector = FaceDetector(device=device)
self.conf = get_config()
self.conf.device = device
recognizer = FaceRecognizer(self.conf)
recognizer.model.eval()
self.recognizer = recognizer
if update:
self.targets, self.names = update_facebank(self.conf, recognizer.model, self.detector, tta=self.tta)
else:
self.targets, self.names = load_facebank(self.conf)
def run(self, camera_index=0):
if len(self.targets) < 1:
raise Exception("you don't have any person in facebank: add new person with 'add_from_webcam' or 'add_from_folder' function")
cap = cv2.VideoCapture(camera_index)
cap.set(3, 1280)
cap.set(4, 720)
# frame rate 6 due to my laptop is quite slow...
print('type q for exit')
while cap.isOpened():
ret , frame = cap.read()
if ret == False:
raise Exception('the camera not recognized: change camera_index param to ' + str(0 if camera_index == 1 else 1))
faces, boxes, scores, landmarks = self.detector.detect_align(frame)
if len(faces.shape) > 1:
results, score = self.recognizer.infer(self.conf, faces, self.targets, tta=self.tta)
for idx, bbox in enumerate(boxes):
special_draw(frame, bbox, landmarks[idx], self.names[results[idx] + 1], score[idx])
cv2.imshow('face Capture', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
examples/MACLR/mpu_utils.py | OctoberChang/pecos | 288 | 12619493 | <filename>examples/MACLR/mpu_utils.py
import torch
import torch.nn as nn
import torch.distributed as dist
# Intra-layer model parallel group that the current rank belongs to.
_TENSOR_MODEL_PARALLEL_GROUP = None
# Inter-layer model parallel group that the current rank belongs to.
_PIPELINE_MODEL_PARALLEL_GROUP = None
# Model parallel group (both intra- and pipeline) that the current rank belongs to.
_MODEL_PARALLEL_GROUP = None
# Embedding group.
_EMBEDDING_GROUP = None
# Data parallel group that the current rank belongs to.
_DATA_PARALLEL_GROUP = None
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
# These values enable us to change the mpu sizes on the fly.
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
_MPU_TENSOR_MODEL_PARALLEL_RANK = None
_MPU_PIPELINE_MODEL_PARALLEL_RANK = None
# A list of global ranks for each pipeline group to ease calculation of the source
# rank when broadcasting from the first or last pipeline stage
_PIPELINE_GLOBAL_RANKS = None
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
#assert _DATA_PARALLEL_GROUP is not None, f"data parallel group={_DATA_PARALLEL_GROUP}, which is not initialized"
return _DATA_PARALLEL_GROUP
def get_group_world_size_rank():
group = get_data_parallel_group()
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
return group, rank, world_size
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group())
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
class AllgatherFromDataParallelRegion(torch.autograd.Function):
""" https://github.com/NVIDIA/Megatron-LM/blob/main/pretrain_ict.py#L57 """
@staticmethod
def forward(ctx, input_):
assert input_.dim() == 2
group, rank, world_size = get_group_world_size_rank()
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
torch.distributed.all_gather(tensor_list, input_, group=group)
tensor_list[rank] = input_
output = torch.cat(tensor_list, dim=0).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
group, rank, world_size = get_group_world_size_rank()
assert grad_output.shape[0] % world_size == 0
dim_size = grad_output.shape[0] // world_size
output_list = torch.split(grad_output, dim_size, dim=0)
# get chunk from this rank
output = output_list[rank].contiguous()
return output
def average_losses_across_data_parallel_group(losses):
"""Reduce a tensor of losses across all GPUs."""
averaged_losses = torch.cat([loss.clone().detach().view(1) for loss in losses])
torch.distributed.all_reduce(averaged_losses, group=get_data_parallel_group())
averaged_losses = averaged_losses / torch.distributed.get_world_size(group=get_data_parallel_group())
return averaged_losses
|
surreal/learner/ppo.py | PeihongYu/surreal | 471 | 12619500 | import torch
import torch.nn as nn
import torchx as tx
from torchx.nn.hyper_scheduler import *
import numpy as np
from .base import Learner
from .aggregator import MultistepAggregatorWithInfo
from surreal.model.ppo_net import PPOModel, DiagGauss
from surreal.model.reward_filter import RewardFilter
from surreal.session import Config, extend_config, BASE_SESSION_CONFIG, BASE_LEARNER_CONFIG, ConfigError
class PPOLearner(Learner):
'''
PPOLearner: subclass of Learner that contains PPO algorithm logic
Attributes:
gpu_option: 'cpu' if not using GPU, 'cuda:all' otherwise
model: instance of PPOModel from surreal.model.ppo_net
ref_target_model: instance of PPOModel, kept to used as
reference policy
ppo_mode: string of either 'adapt' or 'clip' to determine
which variant of PPO is used. For details of variants
see https://arxiv.org/pdf/1707.06347.pdf
norm_adv: boolean flag -- whether to use batch advantage
normalization
use_z_filter: boolean flag -- whether to use obs Z-Filtering
actor/critic_optim: Adam Optimizer for policy and baseline network
actor/critic_lr_scheduler: Learning rate scheduler. details see
surreal.utils.pytorch.scheduler
aggregator: experience aggregator used to batch experiences.
for available aggregators, see surreal.learner.aggregator
pd: probability distribution class (Assumed as Diagonal Gaussian)
see surreal.model.ppo_net for details
important member functions:
private methods:
_clip_loss: computes the loss and various statistics
for 'clip' variant PPO
_clip_update: uses loss information to make policy update
_adapt_loss: computes loss and various statistics for
'adapt' variant of PPO
_adapt_update: uses loss info to make policy update
_value_loss: computes loss and various statistics for value function
_value_update: uses loss info to update value function
_gae_and_return: computes generalized advantage estimate and
corresponding N-step return. Details of algorithm can be found
here: https://arxiv.org/pdf/1506.02438.pdf
_advantage_and_return: basic advantage and N-step return estimate
_optimize: fucntion that makes policy and value function update
_post_publish: function that manages metrics and behavior after
parameter release
public methods:
learn: method to perform optimization and send to tensorplex for log
module_dict: returns the corresponding parameters
publish_parameter: publishes parameters in self.model to parameter server
'''
def __init__(self, learner_config, env_config, session_config):
super().__init__(learner_config, env_config, session_config)
# GPU setting
self.current_iteration = 0
self.global_step = 0
if not torch.cuda.is_available():
self.gpu_option = 'cpu'
else:
self.gpu_option = 'cuda:all'
self.use_cuda = torch.cuda.is_available()
if not self.use_cuda:
self.log.info('Using CPU')
else:
self.log.info('Using GPU: {}'.format(self.gpu_option))
# RL general parameters
self.gamma = self.learner_config.algo.gamma
self.lam = self.learner_config.algo.advantage.lam
self.n_step = self.learner_config.algo.n_step
self.use_z_filter = self.learner_config.algo.use_z_filter
self.use_r_filter = self.learner_config.algo.use_r_filter
self.norm_adv = self.learner_config.algo.advantage.norm_adv
self.batch_size = self.learner_config.replay.batch_size
self.action_dim = self.env_config.action_spec.dim[0]
self.obs_spec = self.env_config.obs_spec
self.init_log_sig = self.learner_config.algo.consts.init_log_sig
# PPO parameters
self.ppo_mode = self.learner_config.algo.ppo_mode
self.if_rnn_policy = self.learner_config.algo.rnn.if_rnn_policy
self.horizon = self.learner_config.algo.rnn.horizon
self.lr_actor = self.learner_config.algo.network.lr_actor
self.lr_critic = self.learner_config.algo.network.lr_critic
self.epoch_policy = self.learner_config.algo.consts.epoch_policy
self.epoch_baseline = self.learner_config.algo.consts.epoch_baseline
self.kl_target = self.learner_config.algo.consts.kl_target
self.adjust_threshold = self.learner_config.algo.consts.adjust_threshold
self.reward_scale = self.learner_config.algo.advantage.reward_scale
# PPO mode 'adjust'
self.kl_cutoff_coeff = self.learner_config.algo.adapt_consts.kl_cutoff_coeff
self.beta_init = self.learner_config.algo.adapt_consts.beta_init
self.beta_range = self.learner_config.algo.adapt_consts.beta_range
# PPO mode 'clip'
self.clip_range = self.learner_config.algo.clip_consts.clip_range
self.clip_epsilon_init = self.learner_config.algo.clip_consts.clip_epsilon_init
if self.ppo_mode == 'adapt':
self.beta = self.beta_init
self.eta = self.kl_cutoff_coeff
self.beta_upper = self.beta_range[1]
self.beta_lower = self.beta_range[0]
self.beta_adjust_threshold = self.adjust_threshold
else: # method == 'clip'
self.clip_epsilon = self.clip_epsilon_init
self.clip_adjust_threshold = self.adjust_threshold
self.clip_upper = self.clip_range[1]
self.clip_lower = self.clip_range[0]
# learning rate setting:
self.min_lr = self.learner_config.algo.network.anneal.min_lr
self.lr_update_frequency = self.learner_config.algo.network.anneal.lr_update_frequency
self.frames_to_anneal = self.learner_config.algo.network.anneal.frames_to_anneal
num_updates = int(self.frames_to_anneal / self.learner_config.parameter_publish.exp_interval)
lr_scheduler = eval(self.learner_config.algo.network.anneal.lr_scheduler)
self.exp_counter = 0
self.kl_record = []
with tx.device_scope(self.gpu_option):
self.model = PPOModel(
obs_spec=self.obs_spec,
action_dim=self.action_dim,
model_config=self.learner_config.model,
use_cuda=self.use_cuda,
init_log_sig=self.init_log_sig,
use_z_filter=self.use_z_filter,
if_pixel_input=self.env_config.pixel_input,
rnn_config=self.learner_config.algo.rnn,
)
self.ref_target_model = PPOModel(
obs_spec=self.obs_spec,
action_dim=self.action_dim,
model_config=self.learner_config.model,
use_cuda=self.use_cuda,
init_log_sig=self.init_log_sig,
use_z_filter=self.use_z_filter,
if_pixel_input=self.env_config.pixel_input,
rnn_config=self.learner_config.algo.rnn,
)
self.ref_target_model.update_target_params(self.model)
# Learning parameters and optimizer
self.clip_actor_gradient = self.learner_config.algo.network.clip_actor_gradient
self.actor_gradient_clip_value = self.learner_config.algo.network.actor_gradient_norm_clip
self.clip_critic_gradient = self.learner_config.algo.network.clip_critic_gradient
self.critic_gradient_clip_value = self.learner_config.algo.network.critic_gradient_norm_clip
self.critic_optim = torch.optim.Adam(
self.model.get_critic_params(),
lr=self.lr_critic,
weight_decay=self.learner_config.algo.network.critic_regularization
)
self.actor_optim = torch.optim.Adam(
self.model.get_actor_params(),
lr=self.lr_actor,
weight_decay=self.learner_config.algo.network.actor_regularization
)
# learning rate scheduler
self.actor_lr_scheduler = lr_scheduler(self.actor_optim,
num_updates,
update_freq=self.lr_update_frequency,
min_lr = self.min_lr)
self.critic_lr_scheduler = lr_scheduler(self.critic_optim,
num_updates,
update_freq=self.lr_update_frequency,
min_lr = self.min_lr)
# Experience Aggregator
self.aggregator = MultistepAggregatorWithInfo(self.env_config.obs_spec,
self.env_config.action_spec)
# probability distribution. Gaussian only for now
self.pd = DiagGauss(self.action_dim)
# placeholder for RNN hidden cells
self.cells = None
# Reward White-filtering
if self.use_r_filter:
self.reward_filter= RewardFilter()
def _clip_loss(self, obs, actions, advantages, behave_pol):
"""
Computes the loss with current data. also returns a dictionary of statistics
which includes surrogate loss, clipped surrogate los, policy entropy, clip
constant
return: surreal.utils.pytorch.GPUVariable, dict
Args:
obs: batch of observations in form of (batch_size, obs_dim)
actions: batch of actions in form of (batch_size, act_dim)
advantages: batch of normalized advantage, (batch_size, 1)
behave_pol: batch of behavior policy (batch_size, 2 * act_dim)
Returns:
clip_loss: Variable for loss
stats: dictionary of recorded statistics
"""
learn_pol = self.model.forward_actor(obs, self.cells)
learn_prob = self.pd.likelihood(actions, learn_pol)
behave_prob = self.pd.likelihood(actions, behave_pol)
prob_ratio = learn_prob / behave_prob
cliped_ratio = torch.clamp(prob_ratio, 1 - self.clip_epsilon,
1 + self.clip_epsilon)
surr = -prob_ratio * advantages.view(-1, 1)
cliped_surr = -cliped_ratio * advantages.view(-1, 1)
clip_loss = torch.cat([surr, cliped_surr], 1).max(1)[0].mean()
stats = {
"_surr_loss": surr.mean().item(),
"_clip_surr_loss": clip_loss.item(),
"_entropy": self.pd.entropy(learn_pol).mean().item(),
'_clip_epsilon': self.clip_epsilon
}
return clip_loss, stats
def _clip_update(self, obs, actions, advantages, behave_pol):
"""
Method that makes policy updates. calls _clip_loss method
Note: self.clip_actor_gradient determines whether gradient is clipped
return: dictionary of statistics to be sent to tensorplex server
Args:
obs: batch of observations in form of (batch_size, obs_dim)
actions: batch of actions in form of (batch_size, act_dim)
advantages: batch of normalized advantage, (batch_size, 1)
behave_pol: batch of behavior policy (batch_size, 2 * act_dim)
Returns:
stats: dictionary of recorded statistics
"""
loss, stats = self._clip_loss(obs, actions, advantages, behave_pol)
self.model.clear_actor_grad()
loss.backward()
if self.clip_actor_gradient:
stats['grad_norm_actor'] = nn.utils.clip_grad_norm_(
self.model.get_actor_params(),
self.actor_gradient_clip_value)
self.actor_optim.step()
return stats
def _adapt_loss(self, obs, actions, advantages, behave_pol, ref_pol):
"""
Computes the loss with current data. also returns a dictionary of statistics
which includes surrogate loss, clipped surrogate los, policy entropy, adaptive
KL penalty constant, policy KL divergence
return: surreal.utils.pytorch.GPUVariable, dict
Args:
obs: batch of observations in form of (batch_size, obs_dim)
actions: batch of actions in form of (batch_size, act_dim)
advantages: batch of normalized advantage, (batch_size, 1)
behave_pol: batch of behavior policy (batch_size, 2 * act_dim)
ref_pol: batch of reference policy (batch_size, 2 * act_dim)
Returns:
loss: Variable for loss
stats: dictionary of recorded statistics
"""
learn_pol = self.model.forward_actor(obs, self.cells)
prob_behave = self.pd.likelihood(actions, behave_pol)
prob_learn = self.pd.likelihood(actions, learn_pol)
kl = self.pd.kl(ref_pol, learn_pol).mean()
surr = -(advantages.view(-1, 1) * (prob_learn/ torch.clamp(prob_behave, min=1e-2))).mean()
loss = surr + self.beta * kl
entropy = self.pd.entropy(learn_pol).mean()
if kl.item() - 2.0 * self.kl_target > 0:
loss += self.eta * (kl - 2.0 * self.kl_target).pow(2)
stats = {
'_kl_loss_adapt': loss.item(),
'_surr_loss': surr.item(),
'_pol_kl': kl.item(),
'_entropy': entropy.item(),
'_beta': self.beta
}
return loss, stats
def _adapt_update(self, obs, actions, advantages, behave_pol, ref_pol):
"""
Method that makes policy updates. calls _adapt_loss method
Note: self.clip_actor_gradient determines whether gradient is clipped
return: dictionary of statistics to be sent to tensorplex server
Args:
obs: batch of observations in form of (batch_size, obs_dim)
actions: batch of actions in form of (batch_size, act_dim)
advantages: batch of normalized advantage, (batch_size, 1)
behave_pol: batch of behavior policy (batch_size, 2 * act_dim)
ref_pol: batch of reference policy (batch_size, 2 * act_dim)
Returns:
stats: dictionary of recorded statistics
"""
loss, stats = self._adapt_loss(obs, actions, advantages, behave_pol, ref_pol)
self.model.clear_actor_grad()
loss.backward()
if self.clip_actor_gradient:
stats['grad_norm_actor'] = nn.utils.clip_grad_norm_(
self.model.get_actor_params(),
self.actor_gradient_clip_value)
self.actor_optim.step()
return stats
def _value_loss(self, obs, returns):
"""
Computes the loss with current data. also returns a dictionary of statistics
which includes value loss and explained variance
return: surreal.utils.pytorch.GPUVariable, dict
Args:
obs: batch of observations in form of (batch_size, obs_dim)
returns: batch of N-step return estimate (batch_size,)
Returns:
loss: Variable for loss
stats: dictionary of recorded statistics
"""
values = self.model.forward_critic(obs, self.cells)
if len(values.size()) == 3: values = values.squeeze(2)
explained_var = 1 - torch.var(returns - values) / torch.var(returns)
loss = (values - returns).pow(2).mean()
stats = {
'_val_loss': loss.item(),
'_val_explained_var': explained_var.item()
}
return loss, stats
def _value_update(self, obs, returns):
"""
Method that makes baseline function updates. calls _value_loss method
Note: self.clip_actor_gradient determines whether gradient is clipped
return: dictionary of statistics to be sent to tensorplex server
Args:
obs: batch of observations in form of (batch_size, obs_dim)
returns: batch of N-step return estimate (batch_size,)
Returns:
stats: dictionary of recorded statistics
"""
loss, stats = self._value_loss(obs, returns)
self.model.clear_critic_grad()
loss.backward()
if self.clip_critic_gradient:
stats['grad_norm_critic'] = nn.utils.clip_grad_norm_(
self.model.get_critic_params(),
self.critic_gradient_clip_value)
self.critic_optim.step()
return stats
def _gae_and_return(self, obs, obs_next, rewards, dones):
'''
computes generalized advantage estimate and corresponding N-step return.
Details of algorithm can be found here: https://arxiv.org/pdf/1506.02438.pdf
Args:
obs: batch of observations (batch_size, N-step , obs_dim)
obs_next: batch of next observations (batch_size, 1 , obs_dim)
actions: batch of actions (batch_size, N-step , act_dim)
rewards: batch of rewards (batch_size, N-step)
dones: batch of termination flags (batch_size, N-step)
Returns:
obs: batch of observation (batch_size, obs_dim)
actions: batch of action (batch_size, act_dim)
advantage: batch of advantages (batch_size, 1)
returns: batch of returns (batch_size, 1)
'''
with tx.device_scope(self.gpu_option):
index_set = torch.tensor(range(self.n_step), dtype=torch.float32)
gamma = torch.pow(self.gamma, index_set)
lam = torch.pow(self.lam, index_set)
obs_concat_var = {}
for mod in obs.keys():
obs_concat_var[mod] = {}
for k in obs[mod].keys():
obs_concat_var[mod][k] = (torch.cat([obs[mod][k], obs_next[mod][k]], dim=1))
if not self.if_rnn_policy:
obs_shape = obs_concat_var[mod][k].size()
obs_concat_var[mod][k] = obs_concat_var[mod][k].view(-1, *obs_shape[2:])
values = self.model.forward_critic(obs_concat_var, self.cells)
values = values.view(self.batch_size, self.n_step + 1)
values[:, 1:] *= 1 - dones
if self.if_rnn_policy:
tds = rewards + self.gamma * values[:, 1:] - values[:, :-1]
eff_len = self.n_step - self.horizon + 1
gamma = gamma[:self.horizon]
lam = lam[:self.horizon]
returns = torch.zeros(self.batch_size, eff_len)
advs = torch.zeros(self.batch_size, eff_len)
for step in range(eff_len):
returns[:, step] = torch.sum(gamma * rewards[:, step:step + self.horizon], 1) + \
values[:, step + self.horizon] * (self.gamma ** self.horizon)
advs[:, step] = torch.sum(tds[:, step:step + self.horizon] * gamma * lam, 1)
if self.norm_adv:
std = advs.std()
mean = advs.mean()
advs = (advs - mean) / max(std, 1e-4)
return advs, returns
else:
returns = torch.sum(gamma * rewards, 1) + values[:, -1] * (self.gamma ** self.n_step)
tds = rewards + self.gamma * values[:, 1:] - values[:, :-1]
gae = torch.sum(tds * gamma * lam, 1)
if self.norm_adv:
std = gae.std()
mean = gae.mean()
gae = (gae - mean) / max(std, 1e-4)
return gae.view(-1, 1), returns.view(-1, 1)
def _preprocess_batch_ppo(self, batch):
'''
Loading experiences from numpy to torch.FloatTensor type
Args:
batch: BeneDict of experiences containing following attributes
'obs' - observation
'actions' - actions
'rewards' - rewards
'obs_next' - next observation
'persistent_infos' - action policy
'onetime_infos' - RNN hidden cells or None
Return:
Benedict of torch.FloatTensors
'''
with tx.device_scope(self.gpu_option):
obs, actions, rewards, obs_next, done, persistent_infos, onetime_infos = (
batch['obs'],
batch['actions'],
batch['rewards'],
batch['obs_next'],
batch['dones'],
batch['persistent_infos'],
batch['onetime_infos'],
)
for modality in obs:
for key in obs[modality]:
obs[modality][key] = (torch.tensor(obs[modality][key], dtype=torch.float32)).detach()
obs_next[modality][key] = (torch.tensor(obs_next[modality][key], dtype=torch.float32)).detach()
actions = torch.tensor(actions, dtype=torch.float32)
rewards = torch.tensor(rewards, dtype=torch.float32) * self.reward_scale
if self.use_r_filter:
normed_reward = self.reward_filter.forward(rewards)
self.reward_filter.update(rewards)
rewards = normed_reward
done = torch.tensor(done, dtype=torch.float32)
if persistent_infos is not None:
for i in range(len(persistent_infos)):
persistent_infos[i] = torch.tensor(persistent_infos[i], dtype=torch.float32).detach()
if onetime_infos is not None:
for i in range(len(onetime_infos)):
onetime_infos[i] = torch.tensor(onetime_infos[i], dtype=torch.float32).detach()
(
batch['obs'],
batch['actions'],
batch['rewards'],
batch['obs_next'],
batch['dones'],
batch['persistent_infos'],
batch['onetime_infos'],
) = (
obs,
actions,
rewards,
obs_next,
done,
persistent_infos,
onetime_infos
)
return batch
def _optimize(self, obs, actions, rewards, obs_next, persistent_infos, onetime_infos, dones):
'''
main method for optimization that calls _adapt/clip_update and
_value_update epoch_policy and epoch_baseline times respectively
return: dictionary of tracted statistics
Args:
obs: batch of observations (batch_size, N-step , obs_dim)
obs_next: batch of next observations (batch_size, 1 , obs_dim)
actions: batch of actions (batch_size, N-step , act_dim)
rewards: batch of rewards (batch_size, N-step)
dones: batch of termination flags (batch_size, N-step)
action_infos: list of batched other attributes tracted, such as
behavior policy, RNN hidden states and etc.
Returns:
dictionary of recorded statistics
'''
# convert everything to float tensor:
with tx.device_scope(self.gpu_option):
pds = persistent_infos[-1]
if self.if_rnn_policy:
h = (onetime_infos[0].transpose(0, 1).contiguous()).detach()
c = (onetime_infos[1].transpose(0, 1).contiguous()).detach()
self.cells = (h, c)
advantages, returns = self._gae_and_return(obs,
obs_next,
rewards,
dones)
advantages = advantages.detach()
returns = returns.detach()
if self.if_rnn_policy:
h = self.cells[0].detach()
c = self.cells[1].detach()
self.cells = (h, c)
eff_len = self.n_step - self.horizon + 1
behave_pol = pds[:, :eff_len, :].contiguous().detach()
actions_iter = actions[:, :eff_len, :].contiguous().detach()
else:
behave_pol = pds[:, 0, :].contiguous().detach()
actions_iter = actions[:, 0, :].contiguous().detach()
obs_iter = {}
for mod in obs.keys():
obs_iter[mod] = {}
for k in obs[mod].keys():
if self.if_rnn_policy:
obs_iter[mod][k] = obs[mod][k][:, :self.n_step - self.horizon + 1, :].contiguous().detach()
else:
obs_iter[mod][k] = obs[mod][k][:, 0, :].contiguous().detach()
ref_pol = self.ref_target_model.forward_actor(obs_iter, self.cells).detach()
for ep in range(self.epoch_policy):
if self.ppo_mode == 'clip':
stats = self._clip_update(obs_iter,
actions_iter,
advantages,
behave_pol)
else:
stats = self._adapt_update(obs_iter,
actions_iter,
advantages,
behave_pol,
ref_pol)
curr_pol = self.model.forward_actor(obs_iter, self.cells).detach()
kl = self.pd.kl(ref_pol, curr_pol).mean()
stats['_pol_kl'] = kl.item()
if kl.item() > self.kl_target * 4:
break
self.kl_record.append(stats['_pol_kl'])
for _ in range(self.epoch_baseline):
baseline_stats = self._value_update(obs_iter, returns)
# Collecting metrics and updating tensorplex
for k in baseline_stats:
stats[k] = baseline_stats[k]
behave_likelihood = self.pd.likelihood(actions_iter, behave_pol)
curr_likelihood = self.pd.likelihood(actions_iter, curr_pol)
stats['_avg_return_targ'] = returns.mean().item()
stats['_avg_log_sig'] = self.model.actor.log_var.mean().item()
stats['_avg_behave_likelihood'] = behave_likelihood.mean().item()
stats['_avg_is_weight'] = (curr_likelihood / (behave_likelihood + 1e-4)).mean().item()
stats['_ref_behave_diff'] = self.pd.kl(ref_pol, behave_pol).mean().item()
stats['_lr'] = self.actor_lr_scheduler.get_lr()[0]
if self.use_z_filter:
self.model.z_update(obs_iter)
stats['obs_running_mean'] = np.mean(self.model.z_filter.running_mean())
stats['obs_running_square'] = np.mean(self.model.z_filter.running_square())
stats['obs_running_std'] = np.mean(self.model.z_filter.running_std())
if self.use_r_filter:
stats['reward_mean'] = self.reward_filter.reward_mean()
return stats
def learn(self, batch):
'''
main method for learning, calls _optimize. Also sends update stats
to Tensorplex
Args:
batch: pre-aggregated list of experiences rolled out by the agent
'''
self.current_iteration += 1
batch = self._preprocess_batch_ppo(batch)
tensorplex_update_dict = self._optimize(
batch.obs,
batch.actions,
batch.rewards,
batch.obs_next,
batch.persistent_infos,
batch.onetime_infos,
batch.dones,
)
self.periodic_checkpoint(
global_steps=self.current_iteration,
score=None,
)
self.tensorplex.add_scalars(tensorplex_update_dict, self.global_step)
self.exp_counter += self.batch_size
self.global_step += 1
def module_dict(self):
'''
returns the corresponding parameters
'''
return {
'ppo': self.model,
}
def publish_parameter(self, iteration, message=''):
"""
Learner publishes latest parameters to the parameter server only when
accumulated enough experiences specified by
learner_config.algo.network.update_target.interval
Note: this overrides the base class publish_parameter method
Args:
iteration: the current number of learning iterations
message: optional message, must be pickleable.
"""
if self.exp_counter >= self.learner_config.parameter_publish.exp_interval:
self._ps_publisher.publish(iteration, message=message)
self._post_publish()
def _post_publish(self):
'''
function that manages metrics and behavior after parameter release
Actions include:
adjusts adaptive threshold for KL penalty for 'adapt' PPO
adjusts adaptive prob ratio clip rate for 'clip' PPO
clears KL-Divergence record
clears experience counter after parameter release
steps actor and critic learning rate scheduler
'''
final_kl = np.mean(self.kl_record)
if self.ppo_mode == 'clip': # adapts clip ratios
if final_kl > self.kl_target * self.clip_adjust_threshold[1]:
if self.clip_lower < self.clip_epsilon:
self.clip_epsilon = self.clip_epsilon / self.learner_config.algo.clip_consts.scale_constant
elif final_kl < self.kl_target * self.clip_adjust_threshold[0]:
if self.clip_upper > self.clip_epsilon:
self.clip_epsilon = self.clip_epsilon * self.learner_config.algo.clip_consts.scale_constant
else: # adapt KL divergence penalty before returning the statistics
if final_kl > self.kl_target * self.beta_adjust_threshold[1]:
if self.beta_upper > self.beta:
self.beta = self.beta * self.learner_config.algo.adapt_consts.scale_constant
elif final_kl < self.kl_target * self.beta_adjust_threshold[0]:
if self.beta_lower < self.beta:
self.beta = self.beta / self.learner_config.algo.adapt_consts.scale_constant
self.ref_target_model.update_target_params(self.model)
self.kl_record = []
self.exp_counter = 0
self.actor_lr_scheduler.step()
self.critic_lr_scheduler.step()
def checkpoint_attributes(self):
'''
outlines attributes to be checkpointed
'''
return [
'model',
'ref_target_model',
'actor_lr_scheduler',
'critic_lr_scheduler',
'current_iteration',
]
def _prefetcher_preprocess(self, batch):
batch = self.aggregator.aggregate(batch)
return batch
|
controller/api/tests/test_build.py | yun-an/deis | 3,375 | 12619541 | """
Unit tests for the Deis api app.
Run the tests with "./manage.py test api"
"""
from __future__ import unicode_literals
import json
from django.contrib.auth.models import User
from django.test import TransactionTestCase
import mock
from rest_framework.authtoken.models import Token
from api.models import Build
from . import mock_status_ok
@mock.patch('api.models.publish_release', lambda *args: None)
class BuildTest(TransactionTestCase):
"""Tests build notification from build system"""
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='autotest')
self.token = Token.objects.get(user=self.user).key
@mock.patch('requests.post', mock_status_ok)
def test_build(self):
"""
Test that a null build is created and that users can post new builds
"""
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# check to see that no initial build was created
url = "/v1/apps/{app_id}/builds".format(**locals())
response = self.client.get(url,
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['count'], 0)
# post a new build
body = {'image': 'autotest/example'}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
build_id = response.data['uuid']
build1 = response.data
self.assertEqual(response.data['image'], body['image'])
# read the build
url = "/v1/apps/{app_id}/builds/{build_id}".format(**locals())
response = self.client.get(url,
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
build2 = response.data
self.assertEqual(build1, build2)
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example'}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
self.assertIn('x-deis-release', response._headers)
build3 = response.data
self.assertEqual(response.data['image'], body['image'])
self.assertNotEqual(build2['uuid'], build3['uuid'])
# disallow put/patch/delete
response = self.client.put(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 405)
response = self.client.patch(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 405)
response = self.client.delete(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 405)
@mock.patch('requests.post', mock_status_ok)
def test_response_data(self):
"""Test that the serialized response contains only relevant data."""
body = {'id': 'test'}
url = '/v1/apps'
response = self.client.post(url, json.dumps(body),
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
# post an image as a build
url = "/v1/apps/test/builds".format(**locals())
body = {'image': 'autotest/example'}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
for key in response.data:
self.assertIn(key, ['uuid', 'owner', 'created', 'updated', 'app', 'dockerfile',
'image', 'procfile', 'sha'])
expected = {
'owner': self.user.username,
'app': 'test',
'dockerfile': '',
'image': 'autotest/example',
'procfile': {},
'sha': ''
}
self.assertDictContainsSubset(expected, response.data)
@mock.patch('requests.post', mock_status_ok)
def test_build_default_containers(self):
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post an image as a build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example'}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers/cmd".format(**locals())
response = self.client.get(url,
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
container = response.data['results'][0]
self.assertEqual(container['type'], 'cmd')
self.assertEqual(container['num'], 1)
# start with a new app
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build with procfile
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example',
'sha': 'a'*40,
'dockerfile': "FROM scratch"}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers/cmd".format(**locals())
response = self.client.get(url,
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
container = response.data['results'][0]
self.assertEqual(container['type'], 'cmd')
self.assertEqual(container['num'], 1)
# start with a new app
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build with procfile
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example',
'sha': 'a'*40,
'dockerfile': "FROM scratch",
'procfile': {'worker': 'node worker.js'}}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers/cmd".format(**locals())
response = self.client.get(url,
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
container = response.data['results'][0]
self.assertEqual(container['type'], 'cmd')
self.assertEqual(container['num'], 1)
# start with a new app
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build with procfile
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example',
'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js',
'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers/web".format(**locals())
response = self.client.get(url,
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
container = response.data['results'][0]
self.assertEqual(container['type'], 'web')
self.assertEqual(container['num'], 1)
@mock.patch('requests.post', mock_status_ok)
def test_build_str(self):
"""Test the text representation of a build."""
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example'}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
build = Build.objects.get(uuid=response.data['uuid'])
self.assertEqual(str(build), "{}-{}".format(
response.data['app'], response.data['uuid'][:7]))
@mock.patch('requests.post', mock_status_ok)
def test_admin_can_create_builds_on_other_apps(self):
"""If a user creates an application, an administrator should be able
to push builds.
"""
# create app as non-admin
user = User.objects.get(username='autotest2')
token = Token.objects.get(user=user).key
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build as admin
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example'}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
build = Build.objects.get(uuid=response.data['uuid'])
self.assertEqual(str(build), "{}-{}".format(
response.data['app'], response.data['uuid'][:7]))
@mock.patch('requests.post', mock_status_ok)
def test_unauthorized_user_cannot_modify_build(self):
"""
An unauthorized user should not be able to modify other builds.
Since an unauthorized user can't access the application, these
requests should return a 403.
"""
app_id = 'autotest'
url = '/v1/apps'
body = {'id': app_id}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
unauthorized_user = User.objects.get(username='autotest2')
unauthorized_token = Token.objects.get(user=unauthorized_user).key
url = '{}/{}/builds'.format(url, app_id)
body = {'image': 'foo'}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(unauthorized_token))
self.assertEqual(response.status_code, 403)
@mock.patch('requests.post', mock_status_ok)
def test_new_build_does_not_scale_up_automatically(self):
"""
After the first initial deploy, if the containers are scaled down to zero,
they should stay that way on a new release.
"""
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example',
'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js',
'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers/web".format(**locals())
response = self.client.get(url,
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# scale to zero
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 0}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 204)
# post another build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example',
'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js',
'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers/web".format(**locals())
response = self.client.get(url,
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 0)
|
tests/test_router.py | cclauss/rb | 335 | 12619556 | import pytest
from rb.cluster import Cluster
from rb.router import UnroutableCommand, extract_keys, BadHostSetup
def test_router_key_routing():
cluster = Cluster({0: {"db": 0},})
router = cluster.get_router()
assert router.get_key("INCR", ["foo"]) == "foo"
assert router.get_key("GET", ["bar"]) == "bar"
with pytest.raises(UnroutableCommand):
router.get_key("MGET", ["foo", "bar", "baz"])
with pytest.raises(UnroutableCommand):
router.get_key("UNKNOWN", [])
def test_host_validation():
cluster = Cluster(hosts={1: {}})
try:
cluster.get_router()
except BadHostSetup as e:
assert 'Expected host with ID "0"' in str(e)
else:
raise Exception("Expected runtime error")
def test_router_basics():
cluster = Cluster({0: {"db": 0}, 1: {"db": 1}, 2: {"db": 2},})
router = cluster.get_router()
assert router.get_host_for_command("INCR", ["foo"]) == 1
assert router.get_host_for_command("INCR", ["bar"]) == 2
assert router.get_host_for_command("INCR", ["baz"]) == 0
assert router.get_host_for_key("foo") == 1
assert router.get_host_for_key("bar") == 2
assert router.get_host_for_key("baz") == 0
def test_key_extraction():
assert extract_keys(["foo"], (1, 1, 1))
assert extract_keys(["foo", "value", "foo2", "value2"], (1, -1, 2)) == [
"foo",
"foo2",
]
assert extract_keys(["extra", "foo", "value", "foo2", "value2"], (2, -1, 2)) == [
"foo",
"foo2",
]
assert extract_keys(["foo", "foo2"], (1, -1, 1)) == ["foo", "foo2"]
|
Depth_First_Search/dfs_recursive.py | Neiva07/Algorithms | 199 | 12619566 | <reponame>Neiva07/Algorithms<filename>Depth_First_Search/dfs_recursive.py
class SimpleGraph:
def __init__(self):
self.edges = {}
@property
def nodes(self):
return self.edges.keys()
def neighbors(self, id):
return self.edges[id]
class DFS(object):
'''
Implementation of recursive depth-first search algorithm
Note for status 0=not visited, 1=seen but not done, 2=done
'''
def __init__(self, graph):
self.graph = graph
self.status = {k:0 for k in graph.nodes}
self.predecessors = {k:None for k in graph.nodes}
self.seen = {k: 0 for k in graph.nodes}
self.done = {k: 0 for k in graph.nodes}
self.t = 0
def dfs_visit_recursive(self, s):
self.status[s] = 1
self.seen[s] = self.t
self.t += 1
neighbours = self.graph.neighbors(s)
for v in neighbours:
if self.status[v] == 0:
self.predecessors[v] = s
self.dfs_visit_recursive(v)
self.status[s] = 2
self.done[s] = self.t
self.t += 1
def search(self):
'''
Searches the graph in depth-first order
:return: dict of seen-times and done-times of nodes
'''
for node in self.graph.nodes:
if self.status[node] == 0:
self.dfs_visit_recursive(node)
return self.seen, self.done
if __name__ == '__main__':
example_graph = SimpleGraph()
example_graph.edges = {
'A': ['B'],
'B': ['A', 'C', 'D'],
'C': ['A'],
'D': ['E', 'A'],
'E': ['B']
}
print(DFS(example_graph).search())
|
malaya_speech/train/model/tacotron2_nvidia/__init__.py | ishine/malaya-speech | 111 | 12619611 | from .model import Model
from ..tacotron2 import generate_guided_attention
|
astroquery/nasa_ads/tests/test_nasaads.py | rickynilsson/astroquery | 577 | 12619617 | <filename>astroquery/nasa_ads/tests/test_nasaads.py<gh_stars>100-1000
import os
import requests
import pytest
from ... import nasa_ads
from ...utils.testing_tools import MockResponse
class MockResponseADS(MockResponse):
"""
Fixing the init issues
"""
def __init__(self, content=None, url=None, headers={},
content_type=None, stream=False, auth=None, status_code=200,
verify=True):
self.content = content
self.raw = content
self.headers = headers
if content_type is not None:
self.headers.update({'Content-Type': content_type})
self.url = url
self.auth = auth
self.status_code = status_code
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_get(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests, 'get', get_mockreturn)
mp.setattr(nasa_ads.ADS, '_request', get_mockreturn)
return mp
def get_mockreturn(method='GET', url=None, headers=None, timeout=10,
**kwargs):
filename = data_path('test_text.txt')
content = open(filename, 'r').read()
return MockResponseADS(content=content)
def test_url():
url = nasa_ads.ADS.query_simple(
"^Persson Origin of water around deeply embedded low-mass protostars", get_query_payload=True)
assert url == ('https://api.adsabs.harvard.edu/v1/search/query?'
'q=%5EPersson%20Origin%20of%20water%20around%20deeply%20embedded%20low-mass%20protostars'
'&fl=bibcode,title,author,aff,pub,volume,pubdate,page,citation,citation_count,abstract,doi,eid'
'&sort=date%20desc'
'&rows=10&start=0')
def test_simple(patch_get):
testADS = nasa_ads.ADS
testADS.TOKEN = 'test-token'
x = testADS.query_simple(
"^Persson Origin of water around deeply embedded low-mass protostars")
assert x['author'][0][0] == '<NAME>.'
assert 'citation' in x.columns
assert 'citation_count' in x.columns
|
Firmware/tools/atcommander.py | davidbuzz/SiK | 120 | 12619631 | #!/usr/bin/env python
#
# Provide command line access to AT command set on radios
#
import serial, sys, argparse, time, fdpexpect
class ATCommandSet(object):
''' Interface to the AT command set '''
### AT Command Constants ###
# Prefix determines if commanding attached or linked radio
AT_LOCAL_PREFIX = 'AT'
AT_REMOTE_PREFIX = 'RT'
# AT commands that are implemented in this class
AT_SHOW_BRD_TYPE = 'I2'
AT_SHOW_BRD_FREQ = 'I3'
AT_SHOW_BRD_VER = 'I4'
AT_SHOW_PARAM = 'I5'
AT_EXIT = 'O'
AT_PARAM = 'S'
AT_REBOOT = 'Z'
AT_PARAM_WRITE = '&W'
# AT commands yet to be implemented here
AT_SHOW_VER_LONG = 'I0'
AT_SHOW_VER = 'I1'
AT_SHOW_TDM = 'I6'
AT_SHOW_RSSI = 'I7'
AT_PARAM_FACTORY = '&F'
AT_DEBUG_RSSI = '&T=RSSI'
AT_DEBUG_TDM = '&T=TDM'
AT_DEBUG_OFF = '&T'
# Parameters are gotten with AT_PARAM + PARAM_* + '?"
# Parameters are set with AT_PARAM + PARAM_* + '=' + value
PARAM_FORMAT = 0
PARAM_SERIAL_SPEED = 1
PARAM_AIR_SPEED = 2
PARAM_NETID = 3
PARAM_TXPOWER = 4
PARAM_ECC = 5
PARAM_MAVLINK = 6
PARAM_OPPRESEND = 7
PARAM_MIN_FREQ = 8
PARAM_MAX_FREQ = 9
PARAM_NUM_CHANNELS = 10
PARAM_DUTY_CYCLE = 11
PARAM_LBT_RSSI = 12
PARAM_MANCHESTER = 13
PARAM_RTSCTS = 14
### Internals ###
# Create object and immediate attempt to connect to radio
def __init__(self, device, baudrate=57600, debug=False,
dsrdtr=False, rtscts=False, xonxoff=False):
# Initialize object data members
self.is_command = False # Track if we've entered command mode
self.is_remote = False # Track if operating on remote radio
self.read_timeout = 5 # Max time to wait for data
logfile=None
if debug:
logfile=sys.stdout
# Initialize the serial connection
# Note: we pass the buck on raised exceptions
self.port = serial.Serial(device, baudrate=baudrate, timeout=0,
dsrdtr=dsrdtr, rtscts=rtscts, xonxoff=xonxoff)
self.ser = fdpexpect.fdspawn(self.port.fileno(), logfile=logfile)
# Send raw text to radio
def __send(self, text):
if (self.port is None) or (self.ser is None):
return False
try:
res = self.ser.send(text)
time.sleep(0.2) # Let the serial line catch up
return res
except:
return False
# Form an AT command and send to radio
def __send_at(self, command):
# Don't send bytes if in "normal" mode, other radio is listening!
if not self.is_command:
return False
prefix = ATCommandSet.AT_LOCAL_PREFIX
if self.is_remote and (command != ATCommandSet.AT_EXIT):
prefix = ATCommandSet.AT_REMOTE_PREFIX
text = '\r\n' + prefix + str(command) + '\r\n'
return not not self.__send(text)
# Look for 'pattern' (string RE) and return MatchObject if seen before read_timeout
def __expect(self, pattern_list):
if (self.port is None) or (self.ser is None):
return False
try:
self.ser.expect(pattern_list, timeout=self.read_timeout)
res = self.ser.match
time.sleep(0.2) # Let the serial line catch up
return res
except:
return False
# Send AT command, then look for pattern
def __query(self, command, pattern):
if not self.__send_at(command):
return False
val = self.__expect(pattern)
return val
# Query for an int
def __query_int(self, command):
val = self.__query(command, ['(\d+)\r\n'])
if val:
return int(val.group(0))
return False
# Query for a float
def __query_float(self, command):
val = self.__query(command, ['(\d+\.\d+)\r\n'])
if val:
return float(val.group(0))
return False
# Query for literal text (return True if found)
def __query_exact(self, command, text):
return not not self.__query(command, ['%s\r\n' % text])
### Manage command mode ###
def enter_command_mode(self):
# Technically okay to resend this command, but won't see an 'OK' back
if self.is_command:
return False
# Will raise a timeout exception if already in command mode
# (due to another process leaving it that way?)
time.sleep(1)
if not self.__send('+++'):
return False
time.sleep(1)
if not self.__expect(['OK']):
return False
self.is_command = True
return True
def leave_command_mode(self):
# Don't send bytes if in "normal" mode, other radio is listening!
self.__send_at(ATCommandSet.AT_EXIT)
self.is_command = False
def leave_command_mode_force(self):
# Overrides mode check, use only if radio is "stuck" in command mode
self.is_command = True
self.leave_command_mode()
def is_command_mode(self):
return self.is_command
### Select local or remote operation ###
def set_remote_mode(self, remote=False):
# True = remote (linked) radio, False = local (attached) radio
self.is_remote = remote
def is_remote_mode(self):
return self.is_remote
### Get general info ###
def get_radio_version(self):
return self.__query_float(ATCommandSet.AT_SHOW_VER)
def get_board_type(self):
return self.__query_int(ATCommandSet.AT_SHOW_BRD_TYPE)
def get_board_frequency(self):
return self.__query_int(ATCommandSet.AT_SHOW_BRD_FREQ)
def get_board_version(self):
return self.__query_int(ATCommandSet.AT_SHOW_BRD_VER)
# Return a multi-line string containing all parameters, for display
def get_params_text(self):
res = self.__query(ATCommandSet.AT_SHOW_PARAM, ['(S0:.*S14:.*)\r\n'])
if res:
return res.group(0)
else:
return "** Could not access parameters **"
### Parameters (settings) access ###
def get_param(self, p_id):
# Assumes all params are ints
return self.__query_int(ATCommandSet.AT_PARAM + str(p_id) + '?')
def set_param(self, p_id, p_val):
return self.__query_exact(ATCommandSet.AT_PARAM + str(p_id) + '=' + str(p_val), 'OK')
# Stores params to EEPROM (necessary after 1+ set_param() calls)
def write_params(self):
return self.__query_exact(ATCommandSet.AT_PARAM_WRITE, 'OK')
### Miscellaneous ###
# Reboot the radio (necessary for settings to take effect)
def reboot(self):
if not self.__send_at(ATCommandSet.AT_REBOOT):
return False
# The local radio leaves command mode upon reboot
if not self.is_remote:
self.is_command = False
return True
### User Interface ###
if __name__ == '__main__':
param_map = { 'format' : ATCommandSet.PARAM_FORMAT,
'serialspeed' : ATCommandSet.PARAM_SERIAL_SPEED,
'airspeed' : ATCommandSet.PARAM_AIR_SPEED,
'netid' : ATCommandSet.PARAM_NETID,
'txpower' : ATCommandSet.PARAM_TXPOWER,
'ecc' : ATCommandSet.PARAM_ECC,
'mavlink' : ATCommandSet.PARAM_MAVLINK,
'oppresend' : ATCommandSet.PARAM_OPPRESEND,
'minfreq' : ATCommandSet.PARAM_MIN_FREQ,
'maxfreq' : ATCommandSet.PARAM_MAX_FREQ,
'channels' : ATCommandSet.PARAM_NUM_CHANNELS,
'duty' : ATCommandSet.PARAM_DUTY_CYCLE,
'lbtrssi' : ATCommandSet.PARAM_LBT_RSSI,
'manchester' : ATCommandSet.PARAM_MANCHESTER,
'rtscts' : ATCommandSet.PARAM_RTSCTS }
# Grok arguments
parser = argparse.ArgumentParser(description='Change settings on local and remote radio.',
epilog="Settable parameters (can use multiple --set-*): %s" % \
" ".join(sorted(param_map.keys())))
parser.add_argument("--baudrate", type=int, default=57600, help='connect at baud rate')
parser.add_argument("--rtscts", action='store_true', default=False, help='connect using rtscts')
parser.add_argument("--dsrdtr", action='store_true', default=False, help='connect using dsrdtr')
parser.add_argument("--xonxoff", action='store_true', default=False, help='connect using xonxoff')
parser.add_argument("--force", action='store_true', default=False, help='cycle command mode first')
parser.add_argument("--debug", action='store_true', default=False, help='intermix raw AT traffic')
parser.add_argument("--list-local", action='store_true', default=False,
help='list local parameters and exit')
parser.add_argument("--list-remote", action='store_true', default=False,
help='list remote parameters and exit')
parser.add_argument("--set-local", nargs=2, action='append', metavar=('PARAM', 'VALUE'),
help='set local parameter (will reboot radio at end)')
parser.add_argument("--set-remote", nargs=2, action='append', metavar=('PARAM', 'VALUE'),
help='set remote parameter (will reboot radio at end)')
parser.add_argument("--set-both", nargs=2, action='append', metavar=('PARAM', 'VALUE'),
help='set on BOTH radios (takes precedence)')
parser.add_argument("device", help='locally attached radio device')
args = parser.parse_args()
# If no get/set was requested, then bail
if not (args.list_local or args.list_remote or \
args.set_local or args.set_remote or args.set_both):
print "Please specify a --list-* or --set-* operation (try -h if unsure)"
sys.exit(0)
# Also bail if attempting to get and set (we could, but we don't)
if (args.list_local or args.list_remote) and \
(args.set_local or args.set_remote or args.set_both):
print "We don't support listing and setting in the same command"
sys.exit(0)
# Parse any --set-* args and build dictionaries of parameters to set
# Note: --set-both overrides --set-local and --set-remote. Beyond that,
# we don't guard against the user specifying strange combinations.
def _parse_set(params, myset):
for pair in params:
prm, val = pair
if prm not in param_map:
print "Parameter not valid: %s" % prm
sys.exit(-1)
try:
myset[prm] = int(val)
except:
print "Param '%s' value must be an integer: %s" % (prm, val)
sys.exit(-1)
return myset
local_set = {}
remote_set = {}
if args.set_local:
local_set = _parse_set(args.set_local, local_set)
if args.set_remote:
remote_set = _parse_set(args.set_remote, remote_set)
if args.set_both:
local_set = _parse_set(args.set_both, local_set)
remote_set = _parse_set(args.set_both, remote_set)
# Initialize the serial connection
at = ATCommandSet(args.device, baudrate=args.baudrate, dsrdtr=args.dsrdtr,
rtscts=args.rtscts, xonxoff=args.xonxoff, debug=args.debug)
# In case the radio was left in command mode, we can force it out
# (Could just not "enter" command mode, but this seems safer somehow)
if args.force:
print "Forcing out of command mode first..."
at.leave_command_mode_force()
# Try to enter command mode, bail if radio doesn't give expected response
print "Entering command mode..."
if not at.enter_command_mode():
print "Could not enter command mode; try --force"
sys.exit(-1)
# If --list-* was requested, do that and exit (don't set any parameters)
def _list_info():
r_ver = at.get_radio_version()
if not r_ver:
print "** Could not access radio **"
else:
print "radio version: %g board type: %d board version: %d" % \
(r_ver,
at.get_board_type() or -1,
at.get_board_version() or -1)
print "Parameters: \n%s" % at.get_params_text()
if args.list_local:
print "Querying local radio..."
_list_info()
if args.list_remote:
at.set_remote_mode(True)
print "Querying remote radio..."
_list_info()
at.set_remote_mode(False)
if args.list_local or args.list_remote:
print "Leaving command mode..."
at.leave_command_mode()
sys.exit(0)
# If --set-* was requested, attempt to do all of them, then write and reboot
# only the radio(s) that was/were changed
def _set_params(myset):
for prm in myset:
if at.set_param(param_map[prm], myset[prm]):
print "Set %s to %d" % (prm, myset[prm])
else:
print "Failed to set %s, aborting without saving changes." % prm
return False
if at.write_params():
print "Wrote parameters to EEPROM."
else:
print "Failed to write parameters to EEPROM, aborting without saving changes."
return False
if at.reboot():
print "Commanded reboot; changes should be in effect momentarily."
else:
print "Failed to command reboot; please manually reboot the radio."
return True
# Try remote radio first
remote_failed = False
if remote_set:
at.set_remote_mode(True)
if not at.get_radio_version:
print "Could not contact remote radio, aborting without saving changes."
remote_failed = True
else:
print "Changing settings on remote radio..."
remote_failed = _set_params(remote_set)
at.set_remote_mode(False)
# Try local radio second (only if no remote failures)
if local_set and not remote_failed:
# Since we have to successfully be in command mode, don't need more checks
print "Changing settings on local radio..."
_set_params(local_set)
# Always leave command mode when finished
# (If we rebooted the local radio at the very end, this will be ignored)
print "Leaving command mode..."
at.leave_command_mode()
sys.exit(0)
|
libsaas/services/pipedrive/filters.py | MidtownFellowship/libsaas | 155 | 12619679 | <filename>libsaas/services/pipedrive/filters.py
from libsaas import http, parsers
from libsaas.services import base
class FiltersResource(base.RESTResource):
path = 'filters'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class Filters(FiltersResource):
@base.apimethod
def get(self, type=None):
"""
Returns all filters.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Filters
"""
params = base.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def delete(self, ids):
"""
Marks multiple filters as deleted.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Filters
"""
params = base.get_params(None, locals())
request = http.Request('DELETE', self.get_url(), params)
return request, parsers.parse_json
class Filter(FiltersResource):
pass
|
tools/new_plugin.py | e7p/hal | 407 | 12619697 | <reponame>e7p/hal<filename>tools/new_plugin.py
#!/usr/bin/env python3
import sys
import os
import re
def print_usage():
print("HAL plugin template generator")
print(" usage: new_plugin <name>")
print("")
print("Sets up the directory structure and respective files in the current directory:")
print("<name>/")
print(" |- include/plugin_<name>/")
print(" | |- plugin_<name>.h")
print(" |- python/")
print(" | |- python_bindings.cpp")
print(" |- src/")
print(" | |- plugin_<name>.cpp")
print(" |- CMakeLists.txt")
print("")
#################################################################
############## Templates ##############
#################################################################
CMAKE_TEMPLATE ="""option(PL_##UPPER## "PL_##UPPER##" OFF)
if(PL_##UPPER## OR BUILD_ALL_PLUGINS)
file(GLOB_RECURSE ##UPPER##_INC ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h)
file(GLOB_RECURSE ##UPPER##_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
file(GLOB_RECURSE ##UPPER##_PYTHON_SRC ${CMAKE_CURRENT_SOURCE_DIR}/python/*.cpp)
hal_add_plugin(##LOWER##
SHARED
HEADER ${##UPPER##_INC}
SOURCES ${##UPPER##_SRC} ${##UPPER##_PYTHON_SRC}
)
endif()
"""
#################################################################
#################################################################
PLUGIN_H_TEMPLATE = """#pragma once
#include "core/plugin_interface_base.h"
namespace hal
{
class PLUGIN_API ##CLASSNAME##Plugin : public BasePluginInterface
{
public:
std::string get_name() const override;
std::string get_version() const override;
void initialize() override;
};
}
"""
#################################################################
#################################################################
PLUGIN_CPP_TEMPLATE = """#include "##LOWER##/plugin_##LOWER##.h"
namespace hal
{
extern std::unique_ptr<BasePluginInterface> create_plugin_instance()
{
return std::make_unique<##CLASSNAME##Plugin>();
}
std::string ##CLASSNAME##Plugin::get_name() const
{
return std::string("##LOWER##");
}
std::string ##CLASSNAME##Plugin::get_version() const
{
return std::string("0.1");
}
void ##CLASSNAME##Plugin::initialize()
{
}
}
"""
#################################################################
#################################################################
PYTHON_CPP_TEMPLATE = """#include "pybind11/operators.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
#include "pybind11/stl_bind.h"
#include "##LOWER##/plugin_##LOWER##.h"
namespace py = pybind11;
namespace hal
{
// the name in PYBIND11_MODULE/PYBIND11_PLUGIN *MUST* match the filename of the output library (without extension),
// otherwise you will get "ImportError: dynamic module does not define module export function" when importing the module
#ifdef PYBIND11_MODULE
PYBIND11_MODULE(##LOWER##, m)
{
m.doc() = "hal ##CLASSNAME##Plugin python bindings";
#else
PYBIND11_PLUGIN(##LOWER##)
{
py::module m("##LOWER##", "hal ##CLASSNAME##Plugin python bindings");
#endif // ifdef PYBIND11_MODULE
py::class_<##CLASSNAME##Plugin, RawPtrWrapper<##CLASSNAME##Plugin>, BasePluginInterface>(m, "##CLASSNAME##Plugin")
.def_property_readonly("name", &##CLASSNAME##Plugin::get_name)
.def("get_name", &##CLASSNAME##Plugin::get_name)
.def_property_readonly("version", &##CLASSNAME##Plugin::get_version)
.def("get_version", &##CLASSNAME##Plugin::get_version)
;
#ifndef PYBIND11_MODULE
return m.ptr();
#endif // PYBIND11_MODULE
}
}
"""
#################################################################
############## CORE ##############
#################################################################
def create_plugin(name):
lower = name.lower()
upper = name.upper()
classname = "".join(x[0].upper()+x[1:] for x in name.split("_"))
os.makedirs(name)
with open(name+"/CMakeLists.txt", "wt") as f:
f.write(CMAKE_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower).replace("##CLASSNAME##", classname))
os.makedirs(name+"/include/"+lower)
with open(name+"/include/"+lower+"/plugin_"+lower+".h", "wt") as f:
f.write(PLUGIN_H_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower).replace("##CLASSNAME##", classname))
os.makedirs(name+"/src")
with open(name+"/src/plugin_"+lower+".cpp", "wt") as f:
f.write(PLUGIN_CPP_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower).replace("##CLASSNAME##", classname))
os.makedirs(name+"/python")
with open(name+"/python/python_bindings.cpp", "wt") as f:
f.write(PYTHON_CPP_TEMPLATE.replace("##UPPER##", upper).replace("##LOWER##", lower).replace("##CLASSNAME##", classname))
if len(sys.argv) != 2:
print_usage()
sys.stderr.write("ERROR: unsupported number of parameters\n")
sys.exit(-1)
name = sys.argv[1].lower()
if not name.replace("_","").isalnum() or name[0] == "_" or name[0].isnumeric():
print_usage()
sys.stderr.write("ERROR: '{}' is not a valid C++ identifier\n".format(name))
sys.exit(-1)
if os.path.exists(name):
print_usage()
sys.stderr.write("ERROR: directory '{}' already exists\n".format(name))
sys.exit(-1)
create_plugin(sys.argv[1])
|
package/tests/application/models/test_theme.py | Jhsmit/awesome-panel | 179 | 12619698 | # pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
from bokeh.themes.theme import Theme as BokehTheme
from holoviews import Cycle as HoloviewsCycle
def test_can_construct(theme):
assert isinstance(theme.spinner_static_url, str)
assert theme.spinner_static_url
assert isinstance(theme.spinner_url, str)
assert theme.spinner_url
assert isinstance(theme.css, str)
assert isinstance(theme.color_cycle, tuple)
assert theme.bokeh_disable_logo is True
assert hasattr(theme, "bokeh_theme_json")
assert isinstance(theme.bokeh_theme, BokehTheme)
assert isinstance(theme.holoviews_color_cycle, HoloviewsCycle)
|
chapter06/cliff_walking.py | psxz/reinforcement-learning-an-introduction | 12,197 | 12619702 | #######################################################################
# Copyright (C) #
# 2016-2018 <NAME>(<EMAIL>) #
# 2016 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
# world height
WORLD_HEIGHT = 4
# world width
WORLD_WIDTH = 12
# probability for exploration
EPSILON = 0.1
# step size
ALPHA = 0.5
# gamma for Q-Learning and Expected Sarsa
GAMMA = 1
# all possible actions
ACTION_UP = 0
ACTION_DOWN = 1
ACTION_LEFT = 2
ACTION_RIGHT = 3
ACTIONS = [ACTION_UP, ACTION_DOWN, ACTION_LEFT, ACTION_RIGHT]
# initial state action pair values
START = [3, 0]
GOAL = [3, 11]
def step(state, action):
i, j = state
if action == ACTION_UP:
next_state = [max(i - 1, 0), j]
elif action == ACTION_LEFT:
next_state = [i, max(j - 1, 0)]
elif action == ACTION_RIGHT:
next_state = [i, min(j + 1, WORLD_WIDTH - 1)]
elif action == ACTION_DOWN:
next_state = [min(i + 1, WORLD_HEIGHT - 1), j]
else:
assert False
reward = -1
if (action == ACTION_DOWN and i == 2 and 1 <= j <= 10) or (
action == ACTION_RIGHT and state == START):
reward = -100
next_state = START
return next_state, reward
# reward for each action in each state
# actionRewards = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
# actionRewards[:, :, :] = -1.0
# actionRewards[2, 1:11, ACTION_DOWN] = -100.0
# actionRewards[3, 0, ACTION_RIGHT] = -100.0
# set up destinations for each action in each state
# actionDestination = []
# for i in range(0, WORLD_HEIGHT):
# actionDestination.append([])
# for j in range(0, WORLD_WIDTH):
# destinaion = dict()
# destinaion[ACTION_UP] = [max(i - 1, 0), j]
# destinaion[ACTION_LEFT] = [i, max(j - 1, 0)]
# destinaion[ACTION_RIGHT] = [i, min(j + 1, WORLD_WIDTH - 1)]
# if i == 2 and 1 <= j <= 10:
# destinaion[ACTION_DOWN] = START
# else:
# destinaion[ACTION_DOWN] = [min(i + 1, WORLD_HEIGHT - 1), j]
# actionDestination[-1].append(destinaion)
# actionDestination[3][0][ACTION_RIGHT] = START
# choose an action based on epsilon greedy algorithm
def choose_action(state, q_value):
if np.random.binomial(1, EPSILON) == 1:
return np.random.choice(ACTIONS)
else:
values_ = q_value[state[0], state[1], :]
return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# an episode with Sarsa
# @q_value: values for state action pair, will be updated
# @expected: if True, will use expected Sarsa algorithm
# @step_size: step size for updating
# @return: total rewards within this episode
def sarsa(q_value, expected=False, step_size=ALPHA):
state = START
action = choose_action(state, q_value)
rewards = 0.0
while state != GOAL:
next_state, reward = step(state, action)
next_action = choose_action(next_state, q_value)
rewards += reward
if not expected:
target = q_value[next_state[0], next_state[1], next_action]
else:
# calculate the expected value of new state
target = 0.0
q_next = q_value[next_state[0], next_state[1], :]
best_actions = np.argwhere(q_next == np.max(q_next))
for action_ in ACTIONS:
if action_ in best_actions:
target += ((1.0 - EPSILON) / len(best_actions) + EPSILON / len(ACTIONS)) * q_value[next_state[0], next_state[1], action_]
else:
target += EPSILON / len(ACTIONS) * q_value[next_state[0], next_state[1], action_]
target *= GAMMA
q_value[state[0], state[1], action] += step_size * (
reward + target - q_value[state[0], state[1], action])
state = next_state
action = next_action
return rewards
# an episode with Q-Learning
# @q_value: values for state action pair, will be updated
# @step_size: step size for updating
# @return: total rewards within this episode
def q_learning(q_value, step_size=ALPHA):
state = START
rewards = 0.0
while state != GOAL:
action = choose_action(state, q_value)
next_state, reward = step(state, action)
rewards += reward
# Q-Learning update
q_value[state[0], state[1], action] += step_size * (
reward + GAMMA * np.max(q_value[next_state[0], next_state[1], :]) -
q_value[state[0], state[1], action])
state = next_state
return rewards
# print optimal policy
def print_optimal_policy(q_value):
optimal_policy = []
for i in range(0, WORLD_HEIGHT):
optimal_policy.append([])
for j in range(0, WORLD_WIDTH):
if [i, j] == GOAL:
optimal_policy[-1].append('G')
continue
bestAction = np.argmax(q_value[i, j, :])
if bestAction == ACTION_UP:
optimal_policy[-1].append('U')
elif bestAction == ACTION_DOWN:
optimal_policy[-1].append('D')
elif bestAction == ACTION_LEFT:
optimal_policy[-1].append('L')
elif bestAction == ACTION_RIGHT:
optimal_policy[-1].append('R')
for row in optimal_policy:
print(row)
# Use multiple runs instead of a single run and a sliding window
# With a single run I failed to present a smooth curve
# However the optimal policy converges well with a single run
# Sarsa converges to the safe path, while Q-Learning converges to the optimal path
def figure_6_4():
# episodes of each run
episodes = 500
# perform 40 independent runs
runs = 50
rewards_sarsa = np.zeros(episodes)
rewards_q_learning = np.zeros(episodes)
for r in tqdm(range(runs)):
q_sarsa = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
q_q_learning = np.copy(q_sarsa)
for i in range(0, episodes):
# cut off the value by -100 to draw the figure more elegantly
# rewards_sarsa[i] += max(sarsa(q_sarsa), -100)
# rewards_q_learning[i] += max(q_learning(q_q_learning), -100)
rewards_sarsa[i] += sarsa(q_sarsa)
rewards_q_learning[i] += q_learning(q_q_learning)
# averaging over independt runs
rewards_sarsa /= runs
rewards_q_learning /= runs
# draw reward curves
plt.plot(rewards_sarsa, label='Sarsa')
plt.plot(rewards_q_learning, label='Q-Learning')
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.ylim([-100, 0])
plt.legend()
plt.savefig('../images/figure_6_4.png')
plt.close()
# display optimal policy
print('Sarsa Optimal Policy:')
print_optimal_policy(q_sarsa)
print('Q-Learning Optimal Policy:')
print_optimal_policy(q_q_learning)
# Due to limited capacity of calculation of my machine, I can't complete this experiment
# with 100,000 episodes and 50,000 runs to get the fully averaged performance
# However even I only play for 1,000 episodes and 10 runs, the curves looks still good.
def figure_6_6():
step_sizes = np.arange(0.1, 1.1, 0.1)
episodes = 1000
runs = 10
ASY_SARSA = 0
ASY_EXPECTED_SARSA = 1
ASY_QLEARNING = 2
INT_SARSA = 3
INT_EXPECTED_SARSA = 4
INT_QLEARNING = 5
methods = range(0, 6)
performace = np.zeros((6, len(step_sizes)))
for run in range(runs):
for ind, step_size in tqdm(list(zip(range(0, len(step_sizes)), step_sizes))):
q_sarsa = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
q_expected_sarsa = np.copy(q_sarsa)
q_q_learning = np.copy(q_sarsa)
for ep in range(episodes):
sarsa_reward = sarsa(q_sarsa, expected=False, step_size=step_size)
expected_sarsa_reward = sarsa(q_expected_sarsa, expected=True, step_size=step_size)
q_learning_reward = q_learning(q_q_learning, step_size=step_size)
performace[ASY_SARSA, ind] += sarsa_reward
performace[ASY_EXPECTED_SARSA, ind] += expected_sarsa_reward
performace[ASY_QLEARNING, ind] += q_learning_reward
if ep < 100:
performace[INT_SARSA, ind] += sarsa_reward
performace[INT_EXPECTED_SARSA, ind] += expected_sarsa_reward
performace[INT_QLEARNING, ind] += q_learning_reward
performace[:3, :] /= episodes * runs
performace[3:, :] /= 100 * runs
labels = ['Asymptotic Sarsa', 'Asymptotic Expected Sarsa', 'Asymptotic Q-Learning',
'Interim Sarsa', 'Interim Expected Sarsa', 'Interim Q-Learning']
for method, label in zip(methods, labels):
plt.plot(step_sizes, performace[method, :], label=label)
plt.xlabel('alpha')
plt.ylabel('reward per episode')
plt.legend()
plt.savefig('../images/figure_6_6.png')
plt.close()
if __name__ == '__main__':
figure_6_4()
figure_6_6()
|
tlaplus_jupyter/__init__.py | ajdavis/tlaplus_jupyter | 113 | 12619722 | __version__ = '0.1'
from .kernel import TLAPlusKernel
|
validation_tests/analytical_exact/lake_at_rest_immersed_bump/numerical_immersed_bump.py | samcom12/anuga_core | 136 | 12619740 | <filename>validation_tests/analytical_exact/lake_at_rest_immersed_bump/numerical_immersed_bump.py
"""Simple water flow example using ANUGA
Water driven up a linear slope and time varying boundary,
similar to a beach environment
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
import sys
import anuga
from anuga import Domain as Domain
from math import cos
from numpy import zeros, float
from time import localtime, strftime, gmtime
#from balanced_dev import *
from anuga import myid, finalize, distribute
#-------------------------------------------------------------------------------
# Copy scripts to time stamped output directory and capture screen
# output to file
#-------------------------------------------------------------------------------
time = strftime('%Y%m%d_%H%M%S',localtime())
#output_dir = 'immersed_bump'+time
output_dir = '.'
output_file = 'immersed_bump'
def bed_elevation(x,y):
z = zeros(len(x), float)
for i in range(len(x)):
if 8.0 < x[i] < 12.0:
z[i] = 0.2 - 0.05*(x[i]-10.)**2
else:
z[i] = 0.0
return z
args = anuga.get_args()
alg = args.alg
verbose = args.verbose
if myid == 0:
#------------------------------------------------------------------------------
# Setup domain
#------------------------------------------------------------------------------
dx = 1.
dy = dx
L = 25.
W = 5*dx
# structured mesh
points, vertices, boundary = anuga.rectangular_cross(int(L/dx), int(W/dy), L, W, (0.0, 0.0))
#domain = anuga.Domain(points, vertices, boundary)
domain = Domain(points, vertices, boundary)
domain.set_name(output_file)
domain.set_datadir(output_dir)
domain.set_flow_algorithm(alg)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
domain.set_quantity('friction', 0.0)
domain.set_quantity('stage', 0.5)
domain.set_quantity('elevation', bed_elevation)
else:
domain = None
#-----------------------------------------------------------------------------
# Parallel Domain
#-----------------------------------------------------------------------------
domain = distribute(domain)
#-----------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
from math import sin, pi, exp
Br = anuga.Reflective_boundary(domain) # Solid reflective wall
#Bt = anuga.Transmissive_boundary(domain) # Continue all values on boundary
#Bd = anuga.Dirichlet_boundary([1,0.,0.]) # Constant boundary values
# Associate boundary tags with boundary objects
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Produce a documentation of parameters
#------------------------------------------------------------------------------
if myid == 0:
parameter_file=open('parameters.tex', 'w')
parameter_file.write('\\begin{verbatim}\n')
from pprint import pprint
pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.write('\\end{verbatim}\n')
parameter_file.close()
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
for t in domain.evolve(yieldstep = 0.1, finaltime = 5.):
#print domain.timestepping_statistics(track_speeds=True)
if myid == 0: print(domain.timestepping_statistics())
domain.sww_merge(delete_old=True)
finalize()
|
dragnet/__init__.py | jdddog/dragnet | 535 | 12619747 | <gh_stars>100-1000
from dragnet.blocks import Blockifier, PartialBlock, BlockifyError
from dragnet import features
from dragnet.extractor import Extractor
from dragnet.util import load_pickled_model
_LOADED_MODELS = {}
def extract_content(html, encoding=None, as_blocks=False):
if 'content' not in _LOADED_MODELS:
_LOADED_MODELS['content'] = load_pickled_model(
'kohlschuetter_readability_weninger_content_model.pkl.gz')
return _LOADED_MODELS['content'].extract(html, encoding=encoding, as_blocks=as_blocks)
def extract_comments(html, encoding=None, as_blocks=False):
if 'comments' not in _LOADED_MODELS:
_LOADED_MODELS['comments'] = load_pickled_model(
'kohlschuetter_readability_weninger_comments_model.pkl.gz')
return _LOADED_MODELS['comments'].extract(html, encoding=encoding, as_blocks=as_blocks)
def extract_content_and_comments(html, encoding=None, as_blocks=False):
if 'content_and_comments' not in _LOADED_MODELS:
_LOADED_MODELS['content_and_comments'] = load_pickled_model(
'kohlschuetter_readability_weninger_comments_content_model.pkl.gz')
return _LOADED_MODELS['content_and_comments'].extract(html, encoding=encoding, as_blocks=as_blocks)
|
tests/integration/test_matrix_power.py | fduguet-nv/cunumeric | 304 | 12619767 | <gh_stars>100-1000
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from test_tools.generators import mk_0to1_array
import cunumeric as cn
from legate.core import LEGATE_MAX_DIM
# TODO: add negative exponents here, once they become supported
EXPONENTS = [0, 1, 3, 5]
@pytest.mark.parametrize("ndim", range(0, LEGATE_MAX_DIM - 2))
@pytest.mark.parametrize("exp", EXPONENTS)
def test_matrix_power(ndim, exp):
shape = (3,) * ndim + (2, 2)
np_a = mk_0to1_array(np, shape)
cn_a = mk_0to1_array(cn, shape)
np_res = np.linalg.matrix_power(np_a, exp)
cn_res = cn.linalg.matrix_power(cn_a, exp)
assert np.allclose(np_res, cn_res)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
mbuild/utils/jsutils.py | daico007/mbuild | 101 | 12619769 | """Utilities for communicating with Javascript (js) libraries.
These are the set of utility methods which are used to communicate with
underlying 'js' libraries by the various notebook visualization libraries used
by mBuild.
"""
from .io import import_
def overwrite_nglview_default(widget):
"""Change the default visualization in nglview.
This method takes in a nglview.NGLWidget and changes the default hover
behaviour of the widget to add the atom index when it is hovered over the
atom. It also overwrites the click signal from the stage to include extra
information(atom index) in the text display, whenever an atom or bond is
clicked.
Parameters
----------
widget: nglview.NGLWidget,
the ipython widget view.
Raises
------
TypeError: If widget is not of type nglview.NGLWidget
"""
nglview = import_("nglview")
if not isinstance(widget, nglview.NGLWidget):
raise TypeError(
"The argument widget can only be of type nglview.NGLWidget not "
"{}".format(type(widget))
)
tooltip_js = """
this.stage.mouseControls.add('hoverPick', (stage, pickingProxy) => {
let tooltip = this.stage.tooltip;
if(pickingProxy && pickingProxy.atom && !pickingProxy.bond){
let atom = pickingProxy.atom;
tooltip.innerText = "ATOM: " + atom.qualifiedName() + ", Index: " + atom.index;
}
});
"""
infotext_js = """
this.stage.signals.clicked.removeAll();
this.stage.signals.clicked.add((pickingProxy) => {
if(pickingProxy){
let pickingText = null;
this.model.set('picked', {});
this.touch();
let currentPick = {};
if(pickingProxy.atom){
currentPick.atom1 = pickingProxy.atom.toObject();
currentPick.atom1.name = pickingProxy.atom.qualifiedName();
pickingText = "Atom: " + currentPick.atom1.name + ", Index: "
+ pickingProxy.atom.index;
}
else if(pickingProxy.bond){
currentPick.bond = pickingProxy.bond.toObject();
currentPick.atom1 = pickingProxy.bond.atom1.toObject();
currentPick.atom1.name = pickingProxy.bond.atom1.qualifiedName();
currentPick.atom2 = pickingProxy.bond.atom2.toObject();
currentPick.atom2.name = pickingProxy.bond.atom2.qualifiedName();
pickingText = "Bond: " + currentPick.atom1.name +
`(${pickingProxy.bond.atom1.index})` +
" - " + currentPick.atom2.name +
`(${pickingProxy.bond.atom2.index})`;
}
if(pickingProxy.instance){
currentPick.instance = pickingProxy.instance;
}
var nComponents = this.stage.compList.length;
for(let i = 0; i < nComponents; i++){
let comp = this.stage.compList[i];
if(comp.uuid == pickingProxy.component.uuid){
currentPick.component = i;
}
}
this.model.set('picked', currentPick);
this.touch();
this.$pickingInfo.text(pickingText);
}
});
"""
widget._js(tooltip_js)
widget._js(infotext_js)
|
scripts/tests/macros_test.py | Duroktar/Wolf | 105 | 12619778 | from ..wolf import test as wolftest
snippet = r"""
inputs = [
[6.4, 2.8, 5.6, 2.2, 2], # ?
[5.0, 2.3, 3.3, 1.0, 1],
[4.9, 2.5, 4.5, 1.7, 2],
]
labels = [x[-1] for x in inputs] # ?
hat = labels # ?
"""
def test_macros(snapshot):
res = wolftest(snippet)
snapshot.assert_match(res)
|
app/config_test.py | modoupi/git-webhook | 1,617 | 12619804 | # -*- coding: utf-8 -*-
DEBUG = True
TESTING = True
SECRET_KEY = 'SECRET_KEY'
DATABASE_URI = 'mysql+pymysql://root:[email protected]/git_webhook'
CELERY_BROKER_URL = 'redis://:@127.0.0.1:6379/0'
CELERY_RESULT_BACKEND = 'redis://:@127.0.0.1:6379/0'
SOCKET_MESSAGE_QUEUE = 'redis://:@127.0.0.1:6379/0'
GITHUB_CLIENT_ID = '123'
GITHUB_CLIENT_SECRET = 'SECRET'
|
py_backwards/transformers/string_types.py | graingert/py-backwards | 338 | 12619820 | from typed_ast import ast3 as ast
from ..utils.tree import find
from ..types import TransformationResult
from .base import BaseTransformer
class StringTypesTransformer(BaseTransformer):
"""Replaces `str` with `unicode`.
"""
target = (2, 7)
@classmethod
def transform(cls, tree: ast.AST) -> TransformationResult:
tree_changed = False
for node in find(tree, ast.Name):
if node.id == 'str':
node.id = 'unicode'
tree_changed = True
return TransformationResult(tree, tree_changed, [])
|
example/numpy-ops/weighted_logistic_regression.py | Vikas-kum/incubator-mxnet | 228 | 12619828 | <reponame>Vikas-kum/incubator-mxnet<filename>example/numpy-ops/weighted_logistic_regression.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import mxnet as mx
class WeightedLogisticRegression(mx.operator.CustomOp):
def __init__(self, pos_grad_scale, neg_grad_scale):
self.pos_grad_scale = float(pos_grad_scale)
self.neg_grad_scale = float(neg_grad_scale)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], mx.nd.divide(1, (1 + mx.nd.exp(- in_data[0]))))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = ((out_data[0] - 1) * in_data[1] * self.pos_grad_scale
+ out_data[0] * (1 - in_data[1]) * self.neg_grad_scale) / out_data[0].shape[1]
@mx.operator.register("weighted_logistic_regression")
class WeightedLogisticRegressionProp(mx.operator.CustomOpProp):
def __init__(self, pos_grad_scale, neg_grad_scale):
self.pos_grad_scale = pos_grad_scale
self.neg_grad_scale = neg_grad_scale
super(WeightedLogisticRegressionProp, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
shape = in_shape[0]
return [shape, shape], [shape]
def create_operator(self, ctx, shapes, dtypes):
return WeightedLogisticRegression(self.pos_grad_scale, self.neg_grad_scale)
if __name__ == '__main__':
m, n = 2, 5
pos, neg = 1, 0.1
data = mx.sym.Variable('data')
wlr = mx.sym.Custom(data, pos_grad_scale=pos, neg_grad_scale=neg, name='wlr',
op_type='weighted_logistic_regression')
lr = mx.sym.LogisticRegressionOutput(data, name='lr')
# MXNET_CPU_WORKER_NTHREADS must be greater than 1 for custom op to work on CPU
context = mx.cpu()
# Uncomment this line to compute on GPU
# context=mx.gpu(0)
exe1 = wlr.simple_bind(ctx=context, data=(2 * m, n))
exe2 = lr.simple_bind(ctx=context, data=(2 * m, n))
exe1.arg_dict['data'][:] = np.ones([2 * m, n])
exe2.arg_dict['data'][:] = np.ones([2 * m, n])
exe1.arg_dict['wlr_label'][:] = np.vstack([np.ones([m, n]), np.zeros([m, n])])
exe2.arg_dict['lr_label'][:] = np.vstack([np.ones([m, n]), np.zeros([m, n])])
exe1.forward(is_train=True)
exe2.forward(is_train=True)
print('Weighted Logistic Regression output:')
print(exe1.outputs[0].asnumpy())
print('Logistic Regression output:')
print(exe2.outputs[0].asnumpy())
exe1.backward()
exe2.backward()
print('Weighted Logistic Regression gradients:')
print(exe1.grad_dict['data'].asnumpy())
print('Logistic Regression gradients:')
print(exe2.grad_dict['data'].asnumpy())
|
notebook/opencv_videocapture_camera.py | vhn0912/python-snippets | 174 | 12619829 | import cv2
cap_cam = cv2.VideoCapture(0)
print(type(cap_cam))
# <class 'cv2.VideoCapture'>
print(cap_cam.isOpened())
# True
cap_cam_wrong = cv2.VideoCapture(1)
print(type(cap_cam_wrong))
# <class 'cv2.VideoCapture'>
print(cap_cam_wrong.isOpened())
# False
cap_cam.release()
|
findsmallbaseball.py | leidix/images-to-osm | 487 | 12619840 | <filename>findsmallbaseball.py
import imagestoosm.config as cfg
import os
import QuadKey.quadkey as quadkey
import numpy as np
import shapely.geometry as geometry
from skimage import draw
from skimage import io
import csv
# load up the OSM features into hash of arrays of polygons, in pixels
for classDir in os.listdir(cfg.rootOsmDir) :
if ( classDir == 'baseball') :
classDirFull = os.path.join( cfg.rootOsmDir,classDir)
for fileName in os.listdir(classDirFull) :
fullPath = os.path.join( cfg.rootOsmDir,classDir,fileName)
with open(fullPath, "rt") as csvfile:
csveader = csv.reader(csvfile, delimiter='\t')
pts = []
for row in csveader:
latLot = (float(row[0]),float(row[1]))
pixel = quadkey.TileSystem.geo_to_pixel(latLot,cfg.tileZoom)
pts.append(pixel)
poly = geometry.Polygon(pts);
areaMeters = poly.area * 0.596 *0.596;
print("{}\t{}".format(fileName,areaMeters))
|
RecoRomanPot/RecoFP420/test/data/runEDTrack_cfg.py | ckamtsikis/cmssw | 852 | 12619843 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
process = cms.Process("TrackFP420Test")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("Geometry.FP420CommonData.FP420GeometryXML_cfi")
process.load("RecoRomanPot.RecoFP420.FP420Track_cfi")
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
moduleSeeds = cms.PSet(
g4SimHits = cms.untracked.uint32(11),
mix = cms.untracked.uint32(12345),
VtxSmeared = cms.untracked.uint32(98765432)
),
sourceSeed = cms.untracked.uint32(123456789)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(200)
)
process.source = cms.Source("PoolSource",
debugFlag = cms.untracked.bool(True),
debugVebosity = cms.untracked.uint32(100),
fileNames = cms.untracked.vstring('file:testCLevent.root')
)
process.o1 = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testTRevent.root')
)
process.Timing = cms.Service("Timing")
process.Tracer = cms.Service("Tracer")
process.p1 = cms.Path(process.FP420Track)
process.outpath = cms.EndPath(process.o1)
process.schedule = cms.Schedule(process.p1,process.outpath)
process.MessageLogger.cerr.default.limit = 10
|
grove/button/button_gpio.py | Hansen0314/grove.py | 122 | 12619858 | <filename>grove/button/button_gpio.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Grove Base Hat for the Raspberry Pi, used to connect grove sensors.
# Copyright (C) 2018 Seeed Technology Co.,Ltd.
#
'''
This is the code for GPIO Button event detection.
'''
from __future__ import division
import time
import threading
from grove.button import Button
from grove.gpio import GPIO
__all__ = ["ButtonTypedGpio"]
_CYCLE_PERIOD = 0.02 # 20 ms
_SINGLE_KEY_TM = 0.03 # 30 ms
_KEY_INTERVAL = 0.3 # 300 ms
_LONG_KEY_TM = 2.0 # 2s
class ButtonTypedGpio(Button):
'''
GPIO Button Class
provide event checking ability to derived class,
should not use directly by end-user.
The checking events include:
- Button.EV_SINGLE_CLICK
- Button.EV_DOUBLE_CLICK
- Button.EV_LONG_PRESS
- Button.EV_LEVEL_CHANGED
Args:
pin(int) : GPIO pin number the button connected.
low_pressed(bool): optional, default True
True if the the button gpio level is low when pressed.
False if level high when pressed
'''
# all key states in FSM
KEY_STATE_IDLE = 0
KEY_STATE_DOWN = 1
KEY_STATE_ONESHOT = 2
KEY_STATE_LONG = 3
def __init__(self, pin, low_pressed = True):
super(ButtonTypedGpio, self).__init__(pin)
self.__low_press = low_pressed
self.__state = self.KEY_STATE_IDLE
self.__duration = 0.0
self.__distance = _KEY_INTERVAL
self.__thrd_exit = False
self.__thrd = None
self.__gpio = GPIO(pin, GPIO.IN)
self.__gpio.on_event = self.__gpio_event
if self.__thrd is None or not self.__thrd.is_alive():
self.__thrd = threading.Thread( \
target = ButtonTypedGpio.__thrd_chk_evt, \
args = (self,))
self.__thrd.setDaemon(True)
self.__thrd.start()
def __del__(self):
self.__thrd_exit = True
while self.__thrd.isAlive():
time.sleep(_CYCLE_PERIOD / _CYCLE_UNIT)
self.__thrd.join()
def is_pressed(self, index = 0):
'''
Get the button status if it's being pressed ?
Args:
index(int): optional, the arg `index` not be used.
Returns:
(bool): True if the button is being pressed.
False if not.
'''
v = self.__gpio.read()
return self.__low_press != bool(v)
# called by GPIO library
def __gpio_event(self, pin, value):
press = self.is_pressed()
tm = time.time()
self._send_event(self.EV_LEVEL_CHANGED, press, tm)
# key event FSM(finite state machine)
def __key_evt_fsm(self, dt):
r = 0
press = self.is_pressed()
self.__distance = self.__distance + dt
if self.__state == self.KEY_STATE_IDLE:
if press:
self.__duration = 0.0
self.__state = self.KEY_STATE_DOWN
elif self.__state == self.KEY_STATE_DOWN:
if press:
self.__duration = self.__duration + dt
if self.__duration >= _SINGLE_KEY_TM:
self.__state = self.KEY_STATE_ONESHOT
elif self.__state == self.KEY_STATE_ONESHOT:
if not press:
# print("distance {}".format(self.__distance))
if self.__distance >= _KEY_INTERVAL:
r = self.EV_SINGLE_CLICK
else:
r = self.EV_DOUBLE_CLICK
else:
self.__duration = self.__duration + dt
# print("duration {}".format(self.__duration))
if self.__duration >= _LONG_KEY_TM:
r = self.EV_LONG_PRESS
self.__state = self.KEY_STATE_LONG
elif self.__state == self.KEY_STATE_LONG:
if not press:
self.__distance = _KEY_INTERVAL
if not press:
self.__state = self.KEY_STATE_IDLE
if r == self.EV_DOUBLE_CLICK:
self.__distance = _KEY_INTERVAL
elif r == self.EV_SINGLE_CLICK:
self.__distance = 0.0
return r, press
# Thread to check events
def __thrd_chk_evt(self):
'''
# prevent dither
time.sleep(0.01)
v = self.__gpio.read()
if self.__low_press == bool(v):
return
'''
self.__last_time = time.time();
while not self.__thrd_exit:
# or self.__state != self.KEY_STATE_IDLE:
t = time.time()
dt, self.__last_time = t - self.__last_time, t
r, pressed = self.__key_evt_fsm(dt)
if r:
self._send_event(r, pressed, t)
time.sleep(_CYCLE_PERIOD)
|
pyNastran/gui/test/all_tests_gui.py | ACea15/pyNastran | 293 | 12619892 | <reponame>ACea15/pyNastran
"""with pyQt5/pySide2 and vtk"""
#import os
import sys
from pyNastran.gui.menus.test.test_about import *
# if 'XVFB' in os.environ or sys.platform == 'win32': # XVFB is for TravisCI and doesn't work
if sys.platform == 'win32' and len(sys.argv) == 1:
from pyNastran.gui.menus.test.test_groups import *
if __name__ == "__main__": # pragma: no cover
import unittest
unittest.main()
|
tests/errors/semantic/blocking/ex1.py | dina-fouad/pyccel | 206 | 12619929 | <gh_stars>100-1000
# pylint: disable=missing-function-docstring, missing-module-docstring/
# coding: utf-8
x = 1
e1 = x + a
e3 = f(x) + 1
# TODO e4 not working yet. we must get 2 errors
#e4 = f(x,y) + 1
|
zeus/networks/pytorch/heads/auxiliary_head.py | shaido987/vega | 240 | 12619942 | <reponame>shaido987/vega<gh_stars>100-1000
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""AuxiliaryHead for NAGO."""
import torch.nn as nn
from zeus.common import ClassType, ClassFactory
@ClassFactory.register(ClassType.NETWORK)
class AuxiliaryHead(nn.Module):
"""Auxiliary head."""
def __init__(self, C, num_classes, large_images):
"""Assuming input size 8x8 if large_images then the input will be 14x14."""
super(AuxiliaryHead, self).__init__()
k = 4
if large_images:
k = 7
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(k, stride=k, padding=0),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
"""Forward method."""
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
|
refinery/bnpy/bnpy-dev/bnpy/allocmodel/mix/DPMixModel.py | csa0001/Refinery | 103 | 12619950 | <filename>refinery/bnpy/bnpy-dev/bnpy/allocmodel/mix/DPMixModel.py
'''
DPMixModel.py
Bayesian parametric mixture model with a unbounded number of components K
Attributes
-------
K : # of components
alpha0 : scalar concentration hyperparameter of Dirichlet process prior
qalpha0 : K-length vector, params for variational factor q(v)
qalpha1 : K-length vector, params for variational factor q(v)
q(v[k]) ~ Beta(qalpha1[k], qalpha0[k])
truncType : str type of truncation for the Dirichlet Process
'z' : truncate on the assignments [default]
or 'v' : truncate stick-breaking distribution
'''
import numpy as np
from bnpy.allocmodel import AllocModel
from bnpy.suffstats import SuffStatBag
from bnpy.util import NumericUtil
from bnpy.util import gammaln, digamma, EPS
class DPMixModel(AllocModel):
######################################################### Constructors
#########################################################
def __init__(self, inferType, priorDict=None):
if inferType == 'EM':
raise ValueError('EM not supported for DPMixModel')
self.inferType = inferType
if priorDict is None:
self.alpha0 = 1.0 # Uniform!
self.alpha1 = 1.0
self.truncType = 'z'
else:
self.set_prior(priorDict)
self.K = 0
def set_prior(self, PriorParamDict):
self.alpha1 = 1.0
self.alpha0 = PriorParamDict['alpha0']
self.truncType = PriorParamDict['truncType']
def set_helper_params( self ):
''' Set dependent attributes given primary global params.
For DP mixture, this means precomputing digammas.
'''
DENOM = digamma(self.qalpha0 + self.qalpha1)
self.ElogV = digamma(self.qalpha1) - DENOM
self.Elog1mV = digamma(self.qalpha0) - DENOM
if self.truncType == 'v':
self.qalpha1[-1] = 1
self.qalpha0[-1] = EPS # avoid digamma(0), which is way too HUGE
self.ElogV[-1] = 0 # log(1) => 0
self.Elog1mV[-1] = np.log(1e-40) # log(0) => -INF, never used
# Calculate expected mixture weights E[ log w_k ]
self.Elogw = self.ElogV.copy() #copy so we can do += without modifying ElogV
self.Elogw[1:] += self.Elog1mV[:-1].cumsum()
######################################################### Accessors
#########################################################
def get_keys_for_memoized_local_params(self):
''' Return list of string names of the LP fields
that this object needs to memoize across visits to a particular batch
'''
return list()
######################################################### Local Params
#########################################################
def calc_local_params(self, Data, LP, **kwargs):
''' Calculate local parameters for each data item and each component.
This is part of the E-step.
Args
-------
Data : bnpy data object with Data.nObs observations
LP : local param dict with fields
E_log_soft_ev : Data.nObs x K array
E_log_soft_ev[n,k] = log p(data obs n | comp k)
Returns
-------
LP : local param dict with fields
resp : Data.nObs x K array whose rows sum to one
resp[n,k] = posterior responsibility that comp. k has for data n
'''
lpr = LP['E_log_soft_ev']
lpr += self.Elogw
# Calculate exp in numerically stable manner (first subtract the max)
# perform this in-place so no new allocations occur
NumericUtil.inplaceExpAndNormalizeRows(lpr)
LP['resp'] = lpr
assert np.allclose(lpr.sum(axis=1), 1)
return LP
######################################################### Suff Stats
#########################################################
def get_global_suff_stats(self, Data, LP,
doPrecompEntropy=False,
doPrecompMergeEntropy=False, mPairIDs=None):
''' Calculate the sufficient statistics for global parameter updates
Only adds stats relevant for this allocModel.
Other stats are added by the obsModel.
Args
-------
Data : bnpy data object
LP : local param dict with fields
resp : Data.nObs x K array,
where resp[n,k] = posterior resp of comp k
doPrecompEntropy : boolean flag
indicates whether to precompute ELBO terms in advance
used for memoized learning algorithms (moVB)
doPrecompMergeEntropy : boolean flag
indicates whether to precompute ELBO terms in advance
for all possible merges of pairs of components
used for optional merge moves
Returns
-------
SS : SuffStats for K components, with field
N : vector of length-K,
effective number of observations assigned to each comp
'''
Nvec = np.sum(LP['resp'], axis=0)
SS = SuffStatBag(K=Nvec.size, D=Data.dim)
SS.setField('N', Nvec, dims=('K'))
if doPrecompEntropy:
ElogqZ_vec = self.E_logqZ(LP)
SS.setELBOTerm('ElogqZ', ElogqZ_vec, dims=('K'))
if doPrecompMergeEntropy:
# Hmerge : KxK matrix of entropies for all possible pair-wise merges
# for example, if we had only 3 components {0,1,2}
# Hmerge = [ 0 H(0,1) H(0,2)
# 0 0 H(1,2)
# 0 0 0 ]
# where H(i,j) is entropy if components i and j merged.
Hmerge = np.zeros((self.K, self.K))
for jj in range(self.K):
compIDs = np.arange(jj+1, self.K)
Rcombo = LP['resp'][:,jj][:,np.newaxis] + LP['resp'][:,compIDs]
Hmerge[jj,compIDs] = np.sum(Rcombo*np.log(Rcombo+EPS), axis=0)
SS.setMergeTerm('ElogqZ', Hmerge, dims=('K','K'))
return SS
######################################################### Global Params
#########################################################
def update_global_params_VB( self, SS, **kwargs ):
''' Updates global params (stick-breaking Beta params qalpha1, qalpha0)
for conventional VB learning algorithm.
New parameters have exactly the number of components specified by SS.
'''
self.K = SS.K
qalpha1 = self.alpha1 + SS.N
qalpha0 = self.alpha0 * np.ones(self.K)
qalpha0[:-1] += SS.N[::-1].cumsum()[::-1][1:]
self.qalpha1 = qalpha1
self.qalpha0 = qalpha0
self.set_helper_params()
def update_global_params_soVB( self, SS, rho, **kwargs ):
''' Update global params (stick-breaking Beta params qalpha1, qalpha0).
for stochastic online VB.
'''
assert self.K == SS.K
qalpha1 = self.alpha1 + SS.N
qalpha0 = self.alpha0 * np.ones( self.K )
qalpha0[:-1] += SS.N[::-1].cumsum()[::-1][1:]
self.qalpha1 = rho * qalpha1 + (1-rho) * self.qalpha1
self.qalpha0 = rho * qalpha0 + (1-rho) * self.qalpha0
self.set_helper_params()
def set_global_params(self, hmodel=None, K=None, qalpha1=None,
qalpha0=None, **kwargs):
''' Directly set global parameters qalpha0, qalpha1 to provided values
'''
if hmodel is not None:
self.K = hmodel.allocModel.K
self.qalpha1 = hmodel.allocModel.qalpha1
self.qalpha0 = hmodel.allocModel.qalpha0
self.set_helper_params()
return
if type(qalpha1) != np.ndarray or qalpha1.size != K or qalpha0.size != K:
raise ValueError("Bad DP Parameters")
self.K = K
self.qalpha1 = qalpha1
self.qalpha0 = qalpha0
self.set_helper_params()
######################################################### Evidence
#########################################################
def calc_evidence(self, Data, SS, LP=None ):
'''
'''
evV = self.E_logpV() - self.E_logqV()
if SS.hasELBOTerm('ElogqZ'):
evZq = np.sum(SS.getELBOTerm('ElogqZ'))
else:
evZq = np.sum(self.E_logqZ(LP))
if SS.hasAmpFactor():
evZ = self.E_logpZ(SS) - SS.ampF * evZq
else:
evZ = self.E_logpZ(SS) - evZq
return evZ + evV
def E_logpZ(self, SS):
return np.inner( SS.N, self.Elogw )
def E_logqZ(self, LP):
return np.sum(LP['resp'] * np.log(LP['resp']+EPS), axis=0)
def E_logpV( self ):
logNormC = gammaln(self.alpha0 + self.alpha1) \
- gammaln(self.alpha0) - gammaln(self.alpha1)
logBetaPDF = (self.alpha1-1)*self.ElogV + (self.alpha0-1)*self.Elog1mV
if self.truncType == 'z':
return self.K*logNormC + logBetaPDF.sum()
elif self.truncType == 'v':
return self.K*logNormC + logBetaPDF[:-1].sum()
def E_logqV( self ):
logNormC = gammaln(self.qalpha0 + self.qalpha1) \
- gammaln(self.qalpha0) - gammaln(self.qalpha1)
logBetaPDF = (self.qalpha1-1)*self.ElogV + (self.qalpha0-1)*self.Elog1mV
if self.truncType == 'z':
return logNormC.sum() + logBetaPDF.sum()
elif self.truncType == 'v':
# skip last entry because entropy of Beta(1,0) = 0
return logNormC[:-1].sum() + logBetaPDF[:-1].sum()
######################################################### IO Utils
######################################################### for humans
def get_info_string( self):
''' Returns one-line human-readable terse description of this object
'''
msgPattern = 'DP mixture with K=%d. Concentration alpha0= %.2f'
return msgPattern % (self.K, self.alpha0)
######################################################### IO Utils
######################################################### for machines
def to_dict(self):
return dict(qalpha1=self.qalpha1, qalpha0=self.qalpha0)
def from_dict(self, myDict):
self.inferType = myDict['inferType']
self.K = myDict['K']
self.qalpha1 = myDict['qalpha1']
self.qalpha0 = myDict['qalpha0']
if self.qalpha0.ndim == 0:
self.qalpha0 = self.qalpha1[np.newaxis]
if self.qalpha0.ndim == 0:
self.qalpha0 = self.qalpha0[np.newaxis]
self.set_helper_params()
def get_prior_dict(self):
return dict(alpha1=self.alpha1, alpha0=self.alpha0, K=self.K,
truncType=self.truncType)
|
clubManagement/admin.py | akshaya9/fosswebsite | 369 | 12620033 | # -*- coding: utf-8 -*-
# created by <NAME>, <EMAIL>
from __future__ import unicode_literals
from django.contrib import admin
from clubManagement.models import Attendance, Team, Responsibility, TeamMember, \
StatusUpdate
admin.site.register(Attendance)
admin.site.register(Team)
admin.site.register(Responsibility)
admin.site.register(TeamMember)
admin.site.register(StatusUpdate)
|
segment/analytics/client.py | wbbradley/analytics-python | 168 | 12620072 | <filename>segment/analytics/client.py<gh_stars>100-1000
from datetime import datetime
from uuid import uuid4
import logging
import numbers
import atexit
from dateutil.tz import tzutc
from six import string_types
from segment.analytics.utils import guess_timezone, clean
from segment.analytics.consumer import Consumer
from segment.analytics.request import post
from segment.analytics.version import VERSION
try:
import queue
except ImportError:
import Queue as queue
ID_TYPES = (numbers.Number, string_types)
class Client(object):
class DefaultConfig(object):
write_key = None
host = None
on_error = None
debug = False
send = True
sync_mode = False
max_queue_size = 10000
gzip = False
timeout = 15
max_retries = 10
proxies = None
thread = 1
upload_interval = 0.5
upload_size = 100
max_retries = 10
"""Create a new Segment client."""
log = logging.getLogger('segment')
def __init__(self,
write_key=DefaultConfig.write_key,
host=DefaultConfig.host,
debug=DefaultConfig.debug,
max_queue_size=DefaultConfig.max_queue_size,
send=DefaultConfig.send,
on_error=DefaultConfig.on_error,
gzip=DefaultConfig.gzip,
max_retries=DefaultConfig.max_retries,
sync_mode=DefaultConfig.sync_mode,
timeout=DefaultConfig.timeout,
proxies=DefaultConfig.proxies,
thread=DefaultConfig.thread,
upload_size=DefaultConfig.upload_size,
upload_interval=DefaultConfig.upload_interval,):
require('write_key', write_key, string_types)
self.queue = queue.Queue(max_queue_size)
self.write_key = write_key
self.on_error = on_error
self.debug = debug
self.send = send
self.sync_mode = sync_mode
self.host = host
self.gzip = gzip
self.timeout = timeout
self.proxies = proxies
if debug:
self.log.setLevel(logging.DEBUG)
if sync_mode:
self.consumers = None
else:
# On program exit, allow the consumer thread to exit cleanly.
# This prevents exceptions and a messy shutdown when the
# interpreter is destroyed before the daemon thread finishes
# execution. However, it is *not* the same as flushing the queue!
# To guarantee all messages have been delivered, you'll still need
# to call flush().
if send:
atexit.register(self.join)
for _ in range(thread):
self.consumers = []
consumer = Consumer(
self.queue, write_key, host=host, on_error=on_error,
upload_size=upload_size, upload_interval=upload_interval,
gzip=gzip, retries=max_retries, timeout=timeout,
proxies=proxies,
)
self.consumers.append(consumer)
# if we've disabled sending, just don't start the consumer
if send:
consumer.start()
def identify(self, user_id=None, traits=None, context=None, timestamp=None,
anonymous_id=None, integrations=None, message_id=None):
traits = traits or {}
context = context or {}
integrations = integrations or {}
require('user_id or anonymous_id', user_id or anonymous_id, ID_TYPES)
require('traits', traits, dict)
msg = {
'integrations': integrations,
'anonymousId': anonymous_id,
'timestamp': timestamp,
'context': context,
'type': 'identify',
'userId': user_id,
'traits': traits,
'messageId': message_id,
}
return self._enqueue(msg)
def track(self, user_id=None, event=None, properties=None, context=None,
timestamp=None, anonymous_id=None, integrations=None,
message_id=None):
properties = properties or {}
context = context or {}
integrations = integrations or {}
require('user_id or anonymous_id', user_id or anonymous_id, ID_TYPES)
require('properties', properties, dict)
require('event', event, string_types)
msg = {
'integrations': integrations,
'anonymousId': anonymous_id,
'properties': properties,
'timestamp': timestamp,
'context': context,
'userId': user_id,
'type': 'track',
'event': event,
'messageId': message_id,
}
return self._enqueue(msg)
def alias(self, previous_id=None, user_id=None, context=None,
timestamp=None, integrations=None, message_id=None):
context = context or {}
integrations = integrations or {}
require('previous_id', previous_id, ID_TYPES)
require('user_id', user_id, ID_TYPES)
msg = {
'integrations': integrations,
'previousId': previous_id,
'timestamp': timestamp,
'context': context,
'userId': user_id,
'type': 'alias',
'messageId': message_id,
}
return self._enqueue(msg)
def group(self, user_id=None, group_id=None, traits=None, context=None,
timestamp=None, anonymous_id=None, integrations=None,
message_id=None):
traits = traits or {}
context = context or {}
integrations = integrations or {}
require('user_id or anonymous_id', user_id or anonymous_id, ID_TYPES)
require('group_id', group_id, ID_TYPES)
require('traits', traits, dict)
msg = {
'integrations': integrations,
'anonymousId': anonymous_id,
'timestamp': timestamp,
'groupId': group_id,
'context': context,
'userId': user_id,
'traits': traits,
'type': 'group',
'messageId': message_id,
}
return self._enqueue(msg)
def page(self, user_id=None, category=None, name=None, properties=None,
context=None, timestamp=None, anonymous_id=None,
integrations=None, message_id=None):
properties = properties or {}
context = context or {}
integrations = integrations or {}
require('user_id or anonymous_id', user_id or anonymous_id, ID_TYPES)
require('properties', properties, dict)
if name:
require('name', name, string_types)
if category:
require('category', category, string_types)
msg = {
'integrations': integrations,
'anonymousId': anonymous_id,
'properties': properties,
'timestamp': timestamp,
'category': category,
'context': context,
'userId': user_id,
'type': 'page',
'name': name,
'messageId': message_id,
}
return self._enqueue(msg)
def screen(self, user_id=None, category=None, name=None, properties=None,
context=None, timestamp=None, anonymous_id=None,
integrations=None, message_id=None):
properties = properties or {}
context = context or {}
integrations = integrations or {}
require('user_id or anonymous_id', user_id or anonymous_id, ID_TYPES)
require('properties', properties, dict)
if name:
require('name', name, string_types)
if category:
require('category', category, string_types)
msg = {
'integrations': integrations,
'anonymousId': anonymous_id,
'properties': properties,
'timestamp': timestamp,
'category': category,
'context': context,
'userId': user_id,
'type': 'screen',
'name': name,
'messageId': message_id,
}
return self._enqueue(msg)
def _enqueue(self, msg):
"""Push a new `msg` onto the queue, return `(success, msg)`"""
timestamp = msg['timestamp']
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=tzutc())
message_id = msg.get('messageId')
if message_id is None:
message_id = uuid4()
require('integrations', msg['integrations'], dict)
require('type', msg['type'], string_types)
require('timestamp', timestamp, datetime)
require('context', msg['context'], dict)
# add common
timestamp = guess_timezone(timestamp)
msg['timestamp'] = timestamp.isoformat()
msg['messageId'] = stringify_id(message_id)
msg['context']['library'] = {
'name': 'analytics-python',
'version': VERSION
}
msg['userId'] = stringify_id(msg.get('userId', None))
msg['anonymousId'] = stringify_id(msg.get('anonymousId', None))
msg = clean(msg)
self.log.debug('queueing: %s', msg)
# if send is False, return msg as if it was successfully queued
if not self.send:
return True, msg
if self.sync_mode:
self.log.debug('enqueued with blocking %s.', msg['type'])
post(self.write_key, self.host, gzip=self.gzip,
timeout=self.timeout, proxies=self.proxies, batch=[msg])
return True, msg
try:
self.queue.put(msg, block=False)
self.log.debug('enqueued %s.', msg['type'])
return True, msg
except queue.Full:
self.log.warning('analytics-python queue is full')
return False, msg
def flush(self):
"""Forces a flush from the internal queue to the server"""
queue = self.queue
size = queue.qsize()
queue.join()
# Note that this message may not be precise, because of threading.
self.log.debug('successfully flushed about %s items.', size)
def join(self):
"""Ends the consumer thread once the queue is empty.
Blocks execution until finished
"""
for consumer in self.consumers:
consumer.pause()
try:
consumer.join()
except RuntimeError:
# consumer thread has not started
pass
def shutdown(self):
"""Flush all messages and cleanly shutdown the client"""
self.flush()
self.join()
def require(name, field, data_type):
"""Require that the named `field` has the right `data_type`"""
if not isinstance(field, data_type):
msg = '{0} must have {1}, got: {2}'.format(name, data_type, field)
raise AssertionError(msg)
def stringify_id(val):
if val is None:
return None
if isinstance(val, string_types):
return val
return str(val)
|
pulsar/common/networking.py | N0nent1ty/pulsar | 326 | 12620081 |
import socket
class Client:
def __init__(self, host, port, timeout, bsize):
# network client configuration
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((host, port))
self.connection.settimeout(timeout)
self.bsize = bsize
print("\n[*] Connected to server...")
def send(self, msg):
self.connection.send(msg)
def recv(self):
return self.connection.recv(self.bsize)
def close(self):
self.connection.close()
def settimeout(self, timeout):
self.connection.settimeout(timeout)
class Server:
def __init__(self, host, port, timeout, bsize):
backlog = 1
self.bsize = bsize
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind((host, port))
self.s.listen(backlog)
def send(self, msg):
self.connection.send(msg)
#print "[*] Sent msg:\n{}".format(msg)
def recv(self):
msg = self.connection.recv(self.bsize)
#print "[*] Received msg:\n{}".format(msg)
return msg
def close(self):
self.connection.close()
def accept(self):
print("\n[ ] Waiting for client connection...")
self.connection, self.address = self.s.accept()
print("[*] Connected to client...")
def settimeout(self, timeout):
self.connection.settimeout(timeout)
|
emails/testsuite/message/helpers.py | MrTango/python-emails | 348 | 12620085 | <filename>emails/testsuite/message/helpers.py
# coding: utf-8
from __future__ import unicode_literals
import os
import emails
from emails.template import JinjaTemplate
TO_EMAIL = os.environ.get('SMTP_TEST_MAIL_TO') or '<EMAIL>'
FROM_EMAIL = os.environ.get('SMTP_TEST_MAIL_FROM') or '<EMAIL>'
ROOT = os.path.dirname(__file__)
def common_email_data(**kw):
T = JinjaTemplate
data = {'charset': 'utf-8',
'subject': T('Olá {{name}}'),
'mail_from': ('LÖVÅS HÅVET', FROM_EMAIL),
'mail_to': ('Pestävä erillään', TO_EMAIL),
'html': T('<h1>Olá {{name}}!</h1><p>O Lorem Ipsum é um texto modelo da indústria tipográfica e de impressão.'),
'text': T('Olá, {{name}}!\nO Lorem Ipsum é um texto modelo da indústria tipográfica e de impressão.'),
'headers': {'X-Mailer': 'python-emails'},
'message_id': emails.MessageID(),
'attachments': [
{'data': 'Sample text', 'filename': 'κατάσχεση.txt'},
{'data': open(os.path.join(ROOT, 'data/pushkin.jpg'), 'rb'), 'filename': 'Пушкин А.С.jpg'}
]}
if kw:
data.update(kw)
return data
|
src/SALib/util/results.py | zjzh/SALib | 573 | 12620092 | import pandas as pd # type: ignore
from SALib.plotting.bar import plot as barplot
class ResultDict(dict):
'''Dictionary holding analysis results.
Conversion methods (e.g. to Pandas DataFrames) to be attached as necessary
by each implementing method
'''
def __init__(self, *args, **kwargs):
super(ResultDict, self).__init__(*args, **kwargs)
def to_df(self):
'''Convert dict structure into Pandas DataFrame.'''
return pd.DataFrame({k: v for k, v in self.items() if k != 'names'},
index=self['names'])
def plot(self, ax=None):
'''Create bar chart of results'''
Si_df = self.to_df()
if isinstance(Si_df, (list, tuple)):
import matplotlib.pyplot as plt # type: ignore
if ax is None:
fig, ax = plt.subplots(1, len(Si_df))
for idx, f in enumerate(Si_df):
barplot(f, ax=ax[idx])
axes = ax
else:
axes = barplot(Si_df, ax=ax)
return axes
|
tests/test_init.py | acivgin1/sphinx-js | 103 | 12620112 | import pytest
from sphinx.errors import SphinxError
from sphinx_js import root_or_fallback
def test_relative_path_root():
"""Make sure the computation of the root path for relative JS entity
pathnames is right."""
# Fall back to the only source path if not specified.
assert root_or_fallback(None, ['a']) == 'a'
with pytest.raises(SphinxError):
root_or_fallback(None, ['a', 'b'])
assert root_or_fallback('smoo', ['a']) == 'smoo'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.