repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Iconik/eve-suite | src/model/static/chr/bloodline.py | 1 | 1330 | from model.flyweight import Flyweight
from model.static.database import database
class Bloodline(Flyweight):
def __init__(self,bloodline_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.bloodline_id = bloodline_id
cursor = database.get_cursor(
"select * from chrBloodlines where bloodlineID={};".format(
self.bloodline_id))
row = cursor.fetchone()
self.bloodline_name = row["bloodlineName"]
self.race_id = row["raceID"]
self.description = row["description"]
self.male_description = row["maleDescription"]
self.female_description = row["femaleDescription"]
self.ship_type_id = row["shipTypeID"]
self.corporation_id = row["corporationID"]
self.perception = row["perception"]
self.willpower = row["willpower"]
self.charisma = row["charisma"]
self.memory = row["memory"]
self.intelligence = row["intelligence"]
self.icon_id = row["iconID"]
self.short_description = row["shortDescription"]
self.short_male_description = row["shortMaleDescription"]
self.short_female_description = row["shortFemaleDescription"]
cursor.close()
| gpl-3.0 | 4,880,972,858,588,442,000 | 35.944444 | 71 | 0.627068 | false |
aspyatkin/assetoolz | assetoolz/__init__.py | 1 | 3894 | from __future__ import absolute_import
import argparse
import os
from .detour import detour_directory
from .cache import Cache
from .db import entry_point
from .appconf import AppConfHelper
from .assets import AssetCollection
import yaml
from .utils import load_file
from .i18n import I18nHelper
from .resource import ResourceSet
class AssetSettings:
def __init__(self, data):
self._data = data
@property
def source(self):
return self._data['source']
@property
def target(self):
return self._data['target']
@property
def languages(self):
return None
class LocalizedAssetSettings(AssetSettings):
@property
def languages(self):
return self._data['languages']
class Settings:
def __init__(self, conf_file, verbose, force):
self._data = yaml.load(load_file(conf_file))
self._html = LocalizedAssetSettings(self._data['html'])
self._images = AssetSettings(self._data['images'])
self._fonts = AssetSettings(self._data['fonts'])
self._scripts = AssetSettings(self._data['scripts'])
self._stylesheets = AssetSettings(self._data['stylesheets'])
self._i18n_helper = I18nHelper(self._data['i18n'])
self._resources = ResourceSet(self._data['resource'])
self._verbose = verbose
self._force = force
@property
def verbose(self):
return self._verbose
@property
def cdn_path(self):
return self._data['cdn']['path']
@property
def cdn_url(self):
return self._data['cdn']['url']
@property
def force(self):
return self._force
@property
def minify(self):
return self._data['minify']
@property
def yuicompressor_file(self):
return self._data['yuicompressor_file']
@property
def htmlcompressor_file(self):
return self._data['htmlcompressor_file']
@property
def coffee_bin(self):
return self._data['coffee_bin']
@property
def appconf(self):
return self._data['config']
@property
def i18n(self):
return self._data['i18n']
@property
def cache_path(self):
return self._data['cache']
@property
def partials(self):
return os.path.join(self.cache_path, "partials")
@property
def html(self):
return self._html
@property
def images(self):
return self._images
@property
def fonts(self):
return self._fonts
@property
def scripts(self):
return self._scripts
@property
def stylesheets(self):
return self._stylesheets
@property
def i18n_helper(self):
return self._i18n_helper
@property
def resources(self):
return self._resources
@entry_point
def compile(settings):
file_list = []
update_file_list = lambda x: file_list.append(x)
detour_directory(settings.html.source, update_file_list)
detour_directory(settings.scripts.source, update_file_list)
detour_directory(settings.stylesheets.source, update_file_list)
detour_directory(settings.images.source, update_file_list)
detour_directory(settings.fonts.source, update_file_list)
tool_cache = Cache()
tool_cache.check()
assets = AssetCollection(file_list, settings)
assets.pick_dependencies()
assets.build()
def main():
parser = argparse.ArgumentParser(description="assetoolz")
parser.add_argument('config', metavar='config', type=str)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-f', '--force', action='store_true')
args = parser.parse_args()
config = args.config
verbose = args.verbose
force = args.force
settings = Settings(config, verbose, force)
AppConfHelper().initialize(settings.appconf)
compile(settings)
if __name__ == '__main__':
main()
| mit | 1,401,215,245,384,512,300 | 22.6 | 68 | 0.64227 | false |
danshick/httpdns | client/dns_passthrough.py | 1 | 2178 | import socket
import requests
class DNSQuery:
def __init__(self, data):
self.data=data
self.domain=''
type = (data[2] >> 3) & 15 # Opcode bits
if type == 0: # Standard query
ind=12
len=data[ind]
while len != 0:
self.domain+=data[ind+1:ind+len+1].decode('UTF-8')+'.'
ind+=len+1
len=data[ind]
def response(self, auth_token):
packet= b''
if not self.domain:
return packet
if(self.domain[-5:] == ".lan."):
self.domain = self.domain[:-5]
#print(self.domain);
s = requests.Session();
ip = s.post('http://209.141.61.214/api/dns', data='{"domain":"'+ self.domain +'"}', headers={'Authorization':auth_token})
if(ip.status_code != 200):
#print(ip.text);
return packet
ip = ip.text
#print(ip)
packet+=self.data[:2] + b'\x81\x80'
packet+=self.data[4:6] + self.data[4:6] + b'\x00\x00\x00\x00' # Questions and Answers Counts
packet+=self.data[12:] # Original Domain Name Question
packet+= b'\xc0\x0c' # Pointer to domain name
packet+= b'\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet+= bytes(list(map(lambda x: int(x), ip.split('.')))) # 4bytes of IP
return packet
if __name__ == '__main__':
uname = input("Enter your username: ");
passwd = input("Enter your password: ");
s = requests.Session();
auth_token = s.post('http://209.141.61.214/api/login', data='{"username":"'+ uname +'", "password":"'+ passwd +'"}').text
ip='192.168.1.1'
print('pymindfakeDNS:: dom.query. 60 IN A ', ip)
udps = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udps.bind(('',53))
try:
while 1:
try:
data, addr = udps.recvfrom(1024)
p=DNSQuery(data)
res = p.response(auth_token)
udps.sendto(res, addr)
#print('Response: ', p.domain, ' -> ', ip)
except Exception as e:
#print("Some UDP error occured.");
pass;
except KeyboardInterrupt:
print('Finished!')
udps.close()
| gpl-2.0 | 4,777,273,893,117,795,000 | 32.507692 | 125 | 0.550046 | false |
erdc-cm/air-water-vv | 2d/numericalTanks/nonlinearWaves/nonlinear_waves.py | 1 | 13481 | """
Non linear waves
"""
from proteus import Domain, Context
from proteus.mprans import SpatialTools as st
from proteus import WaveTools as wt
import math
import numpy as np
opts=Context.Options([
# predefined test cases
("water_level", 0.4, "Water level from y=0"),
# tank
("tank_dim", (7.8, 0.7), "Dimensions of the operational domain of tank (l x h)"),
("generation", True, "Generate waves at the left boundary (True/False)"),
("absorption", True, "Absorb waves at the right boundary (True/False)"),
("tank_sponge", (3.9,7.8), "Length of relaxation zones zones in m (left, right)"),
("free_slip", True, "Should tank walls have free slip conditions "
"(otherwise, no slip conditions will be applied)."),
# gravity
("g", [0, -9.81, 0], "Gravity vector in m/s^2"),
# waves
("waves", True, "Generate waves (True/False)"),
("wave_period", 2., "Period of the waves in s"),
("wave_height", 0.15, "Height of the waves in s"),
("wave_dir", (1.,0.,0.), "Direction of the waves (from left boundary)"),
("wave_wavelength",3.9, "Wave length in m"),
("wave_type", 'Fenton', "type of wavregular waves"),
("Bcoeff", np.array([0.13540388,0.02480804,0.00426381,0.00055395,0.00002809,-0.00000926,-0.00000291,-0.00000030]), "Bcoeffs taken from Fenton (1988)"),
("Ycoeff", np.array([0.10563897,0.03899903,0.01306615,0.00457401,0.00172175,0.00070315,0.00033483,0.00024142]), "Ycoeffs, taken from Fenton (1988)"),
("fast", True, "switch for fast cosh calculations in WaveTools"),
# gauges
#("gauge_output", True, "Places Gauges in tank (5 per wavelength)"),
("point_gauge_output", True, "Produce point gauge output"),
("column_gauge_output", True, "Produce column gauge output"),
("gauge_dx", 0.25, "Horizontal spacing of point gauges/column gauges in m"),
# mesh refinement
("refinement", False, "Gradual refinement"),
("he", 0.04, "Set characteristic element size in m"),
("he_max", 10, "Set maximum characteristic element size in m"),
("he_max_water", 10, "Set maximum characteristic in water phase in m"),
("refinement_freesurface", 0.1,"Set area of constant refinement around free surface (+/- value) in m"),
("refinement_caisson", 0.,"Set area of constant refinement (Box) around potential structure (+/- value) in m"),
("refinement_grading", np.sqrt(1.1*4./np.sqrt(3.))/np.sqrt(1.*4./np.sqrt(3)), "Grading of refinement/coarsening (default: 10% volume)"),
# numerical options
("gen_mesh", True, "True: generate new mesh every time. False: do not generate mesh if file exists"),
("use_gmsh", False, "True: use Gmsh. False: use Triangle/Tetgen"),
("movingDomain", False, "True/False"),
("T", 0.1, "Simulation time in s"),
("dt_init", 0.001, "Initial time step in s"),
("dt_fixed", None, "Fixed (maximum) time step"),
("timeIntegration", "backwardEuler", "Time integration scheme (backwardEuler/VBDF)"),
("cfl", 0.5 , "Target cfl"),
("nsave", 5, "Number of time steps to save per second"),
("useRANS", 0, "RANS model"),
])
# ----- CONTEXT ------ #
# waves
omega = 1.
if opts.waves is True:
period = opts.wave_period
omega = 2*np.pi/opts.wave_period
height = opts.wave_height
mwl = depth = opts.water_level
direction = opts.wave_dir
wave = wt.MonochromaticWaves(period=period, waveHeight=height, mwl=mwl, depth=depth,
g=np.array(opts.g), waveDir=direction,
wavelength=opts.wave_wavelength,
waveType=opts.wave_type,
Ycoeff=np.array(opts.Ycoeff),
Bcoeff=np.array(opts.Bcoeff),
Nf=len(opts.Bcoeff),
fast=opts.fast)
wavelength = wave.wavelength
# tank options
waterLevel = opts.water_level
tank_dim = opts.tank_dim
tank_sponge = opts.tank_sponge
# ----- DOMAIN ----- #
domain = Domain.PlanarStraightLineGraphDomain()
# refinement
smoothing = opts.he*3.
# ----- TANK ------ #
tank = st.Tank2D(domain, tank_dim)
# ----- GENERATION / ABSORPTION LAYERS ----- #
tank.setSponge(x_n=tank_sponge[0], x_p=tank_sponge[1])
dragAlpha = 5.*omega/1e-6
if opts.generation:
tank.setGenerationZones(x_n=True, waves=wave, dragAlpha=dragAlpha, smoothing = smoothing)
if opts.absorption:
tank.setAbsorptionZones(x_p=True, dragAlpha = dragAlpha)
# ----- BOUNDARY CONDITIONS ----- #
# Waves
tank.BC['x-'].setUnsteadyTwoPhaseVelocityInlet(wave, smoothing=smoothing, vert_axis=1)
# open top
tank.BC['y+'].setAtmosphere()
if opts.free_slip:
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
if not opts.generation:
tank.BC['x-'].setFreeSlip()
else: # no slip
tank.BC['y-'].setNoSlip()
tank.BC['x+'].setNoSlip()
# sponge
tank.BC['sponge'].setNonMaterial()
for bc in tank.BC_list:
bc.setFixedNodes()
# ----- GAUGES ----- #
column_gauge_locations = []
point_gauge_locations = []
if opts.point_gauge_output or opts.column_gauge_output:
gauge_y = waterLevel - 0.5 * depth
number_of_gauges = tank_dim[0] / opts.gauge_dx + 1
for gauge_x in np.linspace(0, tank_dim[0], int(number_of_gauges)):
point_gauge_locations.append((gauge_x, gauge_y, 0), )
column_gauge_locations.append(((gauge_x, 0., 0.),
(gauge_x, tank_dim[1], 0.)))
if opts.point_gauge_output:
tank.attachPointGauges('twp',
gauges=((('p',), point_gauge_locations),),
fileName='pressure_gaugeArray.csv')
if opts.column_gauge_output:
tank.attachLineIntegralGauges('vof',
gauges=((('vof',), column_gauge_locations),),
fileName='column_gauges.csv')
# ----- ASSEMBLE DOMAIN ----- #
domain.MeshOptions.use_gmsh = opts.use_gmsh
domain.MeshOptions.genMesh = opts.gen_mesh
domain.MeshOptions.he = opts.he
domain.use_gmsh = opts.use_gmsh
st.assembleDomain(domain)
# ----- REFINEMENT OPTIONS ----- #
import MeshRefinement as mr
#domain.MeshOptions = mr.MeshOptions(domain)
tank.MeshOptions = mr.MeshOptions(tank)
if opts.refinement:
grading = opts.refinement_grading
he2 = opts.he
def mesh_grading(start, he, grading):
return '{0}*{2}^(1+log((-1/{2}*(abs({1})-{0})+abs({1}))/{0})/log({2}))'.format(he, start, grading)
he_max = opts.he_max
# he_fs = he2
ecH = 3.
if opts.refinement_freesurface > 0:
box = opts.refinement_freesurface
else:
box = ecH*he2
tank.MeshOptions.refineBox(he2, he_max, -tank_sponge[0], tank_dim[0]+tank_sponge[1], waterLevel-box, waterLevel+box)
tank.MeshOptions.setRefinementFunction(mesh_grading(start='y-{0}'.format(waterLevel-box), he=he2, grading=grading))
tank.MeshOptions.setRefinementFunction(mesh_grading(start='y-{0}'.format(waterLevel+box), he=he2, grading=grading))
domain.MeshOptions.LcMax = he_max #coarse grid
if opts.use_gmsh is True and opts.refinement is True:
domain.MeshOptions.he = he_max #coarse grid
else:
domain.MeshOptions.he = he2 #coarse grid
domain.MeshOptions.LcMax = he2 #coarse grid
tank.MeshOptions.refineBox(opts.he_max_water, he_max, -tank_sponge[0], tank_dim[0]+tank_sponge[1], 0., waterLevel)
else:
domain.MeshOptions.LcMax = opts.he
mr._assembleRefinementOptions(domain)
from proteus import Comm
comm = Comm.get()
if domain.use_gmsh is True:
mr.writeGeo(domain, 'mesh', append=False)
##########################################
# Numerical Options and other parameters #
##########################################
rho_0=998.2
nu_0 =1.004e-6
rho_1=1.205
nu_1 =1.500e-5
sigma_01=0.0
g = [0., -9.81]
from math import *
from proteus import MeshTools, AuxiliaryVariables
import numpy
import proteus.MeshTools
from proteus import Domain
from proteus.Profiling import logEvent
from proteus.default_n import *
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
#----------------------------------------------------
# Boundary conditions and other flags
#----------------------------------------------------
movingDomain=opts.movingDomain
checkMass=False
applyCorrection=True
applyRedistancing=True
freezeLevelSet=True
#----------------------------------------------------
# Time stepping and velocity
#----------------------------------------------------
weak_bc_penalty_constant = 10.0/nu_0#Re
dt_init = opts.dt_init
T = opts.T
nDTout = int(opts.T*opts.nsave)
timeIntegration = opts.timeIntegration
if nDTout > 0:
dt_out= (T-dt_init)/nDTout
else:
dt_out = 0
runCFL = opts.cfl
dt_fixed = opts.dt_fixed
#----------------------------------------------------
# Discretization -- input options
useOldPETSc=False
useSuperlu = not True
spaceOrder = 1
useHex = False
useRBLES = 0.0
useMetrics = 1.0
useVF = 1.0
useOnlyVF = False
useRANS = opts.useRANS # 0 -- None
# 1 -- K-Epsilon
# 2 -- K-Omega, 1998
# 3 -- K-Omega, 1988
# Input checks
if spaceOrder not in [1,2]:
print "INVALID: spaceOrder" + spaceOrder
sys.exit()
if useRBLES not in [0.0, 1.0]:
print "INVALID: useRBLES" + useRBLES
sys.exit()
if useMetrics not in [0.0, 1.0]:
print "INVALID: useMetrics"
sys.exit()
# Discretization
nd = 2
if spaceOrder == 1:
hFactor=1.0
if useHex:
basis=C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,3)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,3)
else:
basis=C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3)
#elementBoundaryQuadrature = SimplexLobattoQuadrature(nd-1,1)
elif spaceOrder == 2:
hFactor=0.5
if useHex:
basis=C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,4)
else:
basis=C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
# Numerical parameters
sc = 0.5 # default: 0.5. Test: 0.25
sc_beta = 1.5 # default: 1.5. Test: 1.
epsFact_consrv_diffusion = 1. # default: 1.0. Test: 0.1
ns_forceStrongDirichlet = False
backgroundDiffusionFactor=0.01
if useMetrics:
ns_shockCapturingFactor = sc
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = sc
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = sc_beta
vof_shockCapturingFactor = sc
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = sc_beta
rd_shockCapturingFactor =sc
rd_lag_shockCapturing = False
epsFact_density = 3.
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = epsFact_consrv_diffusion
redist_Newton = True#False
kappa_shockCapturingFactor = sc
kappa_lag_shockCapturing = True
kappa_sc_uref = 1.0
kappa_sc_beta = sc_beta
dissipation_shockCapturingFactor = sc
dissipation_lag_shockCapturing = True
dissipation_sc_uref = 1.0
dissipation_sc_beta = sc_beta
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = False#True
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True#False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True#False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = max(1.0e-6,0.001*domain.MeshOptions.he**2)
vof_nl_atol_res = max(1.0e-6,0.001*domain.MeshOptions.he**2)
ls_nl_atol_res = max(1.0e-6,0.001*domain.MeshOptions.he**2)
mcorr_nl_atol_res = max(1.0e-6,0.0001*domain.MeshOptions.he**2)
rd_nl_atol_res = max(1.0e-6,0.01*domain.MeshOptions.he)
kappa_nl_atol_res = max(1.0e-6,0.001*domain.MeshOptions.he**2)
dissipation_nl_atol_res = max(1.0e-6,0.001*domain.MeshOptions.he**2)
mesh_nl_atol_res = max(1.0e-6,0.001*domain.MeshOptions.he**2)
#turbulence
ns_closure=0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
if useRANS == 1:
ns_closure = 3
elif useRANS >= 2:
ns_closure == 4
def twpflowPressure_init(x, t):
p_L = 0.0
phi_L = tank_dim[nd-1] - waterLevel
phi = x[nd-1] - waterLevel
return p_L -g[nd-1]*(rho_0*(phi_L - phi)+(rho_1 -rho_0)*(smoothedHeaviside_integral(epsFact_consrv_heaviside*opts.he,phi_L)
-smoothedHeaviside_integral(epsFact_consrv_heaviside*opts.he,phi)))
| mit | 2,326,082,894,898,492,400 | 34.476316 | 155 | 0.640012 | false |
personalrobotics/or_ompl | scripts/wrap_planners.py | 1 | 5651 | #!/usr/bin/env python
# Copyright (c) 2014, Carnegie Mellon University
# All rights reserved.
#
# Authors: Michael Koval <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse, yaml, os.path, sys
factory_frontmatter = """\
#include <map>
#include <string>
#include <boost/assign/list_of.hpp>
{includes:s}
#include <or_ompl/PlannerRegistry.h>
namespace or_ompl {{
namespace registry {{
struct BasePlannerFactory {{
virtual ~BasePlannerFactory() {{ }}
virtual ompl::base::Planner *create(ompl::base::SpaceInformationPtr space) = 0;
}};
/*
* Planner Factories
*/
"""
factory_template = """\
struct {name:s}Factory : public virtual BasePlannerFactory {{
virtual ompl::base::Planner *create(ompl::base::SpaceInformationPtr space)
{{
return new {qualified_name:s}(space);
}}
}};
"""
registry_frontmatter = """
/*
* Planner Registry
*/
typedef std::map<std::string, BasePlannerFactory *> PlannerRegistry;
// The dynamic_cast is necessary to work around a type inference bug when
// using map_list_of on a polymorphic type.
static PlannerRegistry registry = boost::assign::map_list_of
"""
registry_entry = ' ("{name:s}", dynamic_cast<BasePlannerFactory *>(new {name:s}Factory))'
registry_backmatter = """\
;
std::vector<std::string> get_planner_names()
{
std::vector<std::string> names;
names.reserve(registry.size());
PlannerRegistry::const_iterator it;
for (it = registry.begin(); it != registry.end(); ++it) {
names.push_back(it->first);
}
return names;
}
ompl::base::Planner *create(std::string const &name,
ompl::base::SpaceInformationPtr space)
{
PlannerRegistry::const_iterator const it = registry.find(name);
if (it != registry.end()) {
return it->second->create(space);
} else {
throw std::runtime_error("Unknown planner '" + name + "'.");
}
}
} // namespace registry
} // namespace or_ompl
"""
def parse_version(version):
return tuple(int(x) for x in version.split('.'))
def print_colored(colorcode, s):
if sys.stdout.isatty():
print('\033[{}m{}\033[0m'.format(colorcode, s))
else:
print(s)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--include-dirs', type=str,
help='OMPL include directories')
parser.add_argument('--planners-yaml', type=str, required=True,
help='input filename for planner list')
parser.add_argument('--generated-cpp', type=str, required=True,
help='output filename for generated code')
args = parser.parse_args()
include_dirs = args.include_dirs.split(os.path.pathsep)
# Filter planners by version number.
with open(args.planners_yaml) as fin:
planners = yaml.load(fin)
supported_planners = []
print_colored(94, 'Configuring or_ompl planner registry ...')
for planner in planners:
for include_dir in include_dirs:
header_path = os.path.join(include_dir, planner['header'])
if os.path.exists(header_path):
supported_planners.append(planner)
print_colored(92, ' planner {} found'.format(planner['name']))
break
else:
print_colored(91, ' planner {} not found'.format(planner['name']))
planners = supported_planners
with open(args.generated_cpp,'w') as fout:
# Include the necessary OMPL
headers = [ planner['header'] for planner in planners ]
includes = [ '#include <{:s}>'.format(path) for path in headers ]
fout.write(factory_frontmatter.format(includes='\n'.join(includes)))
# Generate the factory class implementations.
names = [ planner['name'] for planner in planners ]
registry_entries = []
for qualified_name in names:
_, _, name = qualified_name.rpartition('::')
args = { 'name': name,
'qualified_name': qualified_name }
fout.write(factory_template.format(**args))
registry_entries.append(registry_entry.format(**args))
# Generate the registry of factory classes.
fout.write(registry_frontmatter)
fout.write('\n'.join(registry_entries))
fout.write(registry_backmatter)
if __name__ == '__main__':
main()
| bsd-2-clause | -1,007,516,185,716,829,200 | 32.838323 | 92 | 0.662538 | false |
sebastic/QGIS | python/plugins/processing/algs/qgis/Merge.py | 1 | 4761 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Gridify.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QgsFields, QgsVectorLayer
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterMultipleInput
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class Merge(GeoAlgorithm):
LAYERS = 'LAYERS'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Merge vector layers')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterMultipleInput(self.LAYERS,
self.tr('Layers to merge'), datatype=ParameterMultipleInput.TYPE_VECTOR_ANY))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Merged')))
def processAlgorithm(self, progress):
inLayers = self.getParameterValue(self.LAYERS)
paths = inLayers.split(';')
layers = []
fields = QgsFields()
totalFeatureCount = 0
for x in xrange(0, len(paths)):
layer = QgsVectorLayer(paths[x], unicode(x), 'ogr')
if (len(layers) > 0):
if (layer.dataProvider().geometryType() != layers[0].dataProvider().geometryType()):
raise GeoAlgorithmExecutionException(
self.tr('All layers must have same geometry type!'))
layers.append(layer)
totalFeatureCount += layer.featureCount()
for sindex, sfield in enumerate(layer.dataProvider().fields()):
found = None
for dfield in fields:
if (dfield.name().upper() == sfield.name().upper()):
found = dfield
if (dfield.type() != sfield.type()):
raise GeoAlgorithmExecutionException(
self.tr('{} field in layer {} has different '
'data type than in other layers.'))
if not found:
fields.append(sfield)
total = 100.0 / totalFeatureCount
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields.toList(), layers[0].dataProvider().geometryType(),
layers[0].crs())
featureCount = 0
for layer in layers:
for feature in layer.dataProvider().getFeatures():
sattributes = feature.attributes()
dattributes = []
for dindex, dfield in enumerate(fields):
if (dfield.type() == QVariant.Int):
dattribute = 0
elif (dfield.type() == QVariant.Double):
dattribute = 0.0
else:
dattribute = ''
for sindex, sfield in enumerate(layer.dataProvider().fields()):
if (sfield.name().upper() == dfield.name().upper()):
if (sfield.type() != dfield.type()):
raise GeoAlgorithmExecutionException(
self.tr('Attribute type mismatch'))
dattribute = sattributes[sindex]
break
dattributes.append(dattribute)
feature.setAttributes(dattributes)
writer.addFeature(feature)
featureCount += 1
progress.setPercentage(int(featureCount * total))
del writer
| gpl-2.0 | -1,091,023,555,943,208,200 | 39.692308 | 126 | 0.508297 | false |
qvazzler/Flexget | flexget/options.py | 1 | 21583 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import copy
import random
import string
import sys
from argparse import ArgumentParser as ArgParser
from argparse import (_VersionAction, Action, ArgumentError, Namespace, PARSER, REMAINDER, SUPPRESS,
_SubParsersAction)
import flexget
from flexget.entry import Entry
from flexget.event import fire_event
from flexget.logger import console
from flexget.utils import requests
_UNSET = object()
core_parser = None
def unicode_argv():
"""Like sys.argv, but decodes all arguments."""
args = []
for arg in sys.argv:
if isinstance(arg, bytes):
arg = arg.decode(sys.getfilesystemencoding())
args.append(arg)
return args
def get_parser(command=None):
global core_parser
if not core_parser:
core_parser = CoreArgumentParser()
# Add all plugin options to the parser
fire_event('options.register')
if command:
return core_parser.get_subparser(command)
return core_parser
def register_command(command, callback, **kwargs):
"""
Register a callback function to be executed when flexget is launched with the given `command`.
:param command: The command being defined.
:param callback: Callback function executed when this command is invoked from the CLI. Should take manager instance
and parsed argparse namespace as parameters.
:param kwargs: Other keyword arguments will be passed to the :class:`arparse.ArgumentParser` constructor
:returns: An :class:`argparse.ArgumentParser` instance ready to be configured with the options for this command.
"""
return get_parser().add_subparser(command, parent_defaults={'cli_command_callback': callback}, **kwargs)
def required_length(nmin, nmax):
"""Generates a custom Action to validate an arbitrary range of arguments."""
class RequiredLength(Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
raise ArgumentError(self, 'requires between %s and %s arguments' % (nmin, nmax))
setattr(args, self.dest, values)
return RequiredLength
class VersionAction(_VersionAction):
"""Action to print the current version. Also checks latest release revision."""
def __call__(self, parser, namespace, values, option_string=None):
# Print the version number
console('%s' % self.version)
# Check for latest version from server
try:
page = requests.get('http://download.flexget.com/latestversion')
except requests.RequestException:
console('Error getting latest version number from download.flexget.com')
else:
ver = page.text.strip()
if self.version == ver:
console('You are on the latest release.')
else:
console('Latest release: %s' % ver)
parser.exit()
class DebugAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
namespace.loglevel = 'debug'
class DebugTraceAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
namespace.debug = True
namespace.log_level = 'trace'
class CronAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
# Only set loglevel if it has not already explicitly been set
if not hasattr(namespace, 'loglevel'):
namespace.loglevel = 'info'
# This makes the old --inject form forwards compatible
class InjectAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
kwargs = {'title': values.pop(0)}
if values:
kwargs['url'] = values.pop(0)
else:
kwargs['url'] = 'http://localhost/inject/%s' % ''.join(random.sample(string.letters + string.digits, 30))
if 'force' in [v.lower() for v in values]:
kwargs['immortal'] = True
entry = Entry(**kwargs)
if 'accept' in [v.lower() for v in values]:
entry.accept(reason='accepted by --inject')
existing = getattr(namespace, self.dest, None) or []
setattr(namespace, self.dest, existing + [entry])
class ParseExtrasAction(Action):
"""This action will take extra arguments, and parser them with a different parser."""
def __init__(self, option_strings, parser, help=None, metavar=None, dest=None, required=False):
if metavar is None:
metavar = '<%s arguments>' % parser.prog
if help is None:
help = 'arguments for the `%s` command are allowed here' % parser.prog
self._parser = parser
super(ParseExtrasAction, self).__init__(option_strings=option_strings, dest=SUPPRESS, help=help,
metavar=metavar, nargs=REMAINDER, required=required)
def __call__(self, parser, namespace, values, option_string=None):
namespace, extras = self._parser.parse_known_args(values, namespace)
if extras:
parser.error('unrecognized arguments: %s' % ' '.join(extras))
class ScopedNamespace(Namespace):
def __init__(self, **kwargs):
super(ScopedNamespace, self).__init__(**kwargs)
self.__parent__ = None
def __getattr__(self, key):
if '.' in key:
scope, key = key.split('.', 1)
return getattr(getattr(self, scope), key)
if self.__parent__:
return getattr(self.__parent__, key)
raise AttributeError("'%s' object has no attribute '%s'" % (type(self).__name__, key))
def __setattr__(self, key, value):
if '.' in key:
scope, key = key.split('.', 1)
if not hasattr(self, scope):
setattr(self, scope, type(self)())
sub_ns = getattr(self, scope, None)
return object.__setattr__(sub_ns, key, value)
# Let child namespaces keep track of us
if key != '__parent__' and isinstance(value, ScopedNamespace):
value.__parent__ = self
return object.__setattr__(self, key, value)
def __iter__(self):
return (i for i in self.__dict__.items() if i[0] != '__parent__')
def __copy__(self):
new = self.__class__()
new.__dict__.update(self.__dict__)
# Make copies of any nested namespaces
for key, value in self:
if isinstance(value, ScopedNamespace):
setattr(new, key, copy.copy(value))
return new
class NestedSubparserAction(_SubParsersAction):
def __init__(self, *args, **kwargs):
self.nested_namespaces = kwargs.pop('nested_namespaces', False)
self.parent_defaults = {}
super(NestedSubparserAction, self).__init__(*args, **kwargs)
def add_parser(self, name, parent_defaults=None, **kwargs):
if parent_defaults:
self.parent_defaults[name] = parent_defaults
return super(NestedSubparserAction, self).add_parser(name, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
if parser_name in self.parent_defaults:
for dest in self.parent_defaults[parser_name]:
if not hasattr(namespace, dest):
setattr(namespace, dest, self.parent_defaults[parser_name][dest])
if self.nested_namespaces:
subnamespace = ScopedNamespace()
super(NestedSubparserAction, self).__call__(parser, subnamespace, values, option_string)
# If dest is set, it should be set on the parent namespace, not subnamespace
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
delattr(subnamespace, self.dest)
setattr(namespace, parser_name, subnamespace)
else:
super(NestedSubparserAction, self).__call__(parser, namespace, values, option_string)
class ParserError(Exception):
def __init__(self, message, parser):
self.message = message
self.parser = parser
def __unicode__(self):
return self.message
def __repr__(self):
return 'ParserError(%s, %s)' % (self.message, self.parser)
class ArgumentParser(ArgParser):
"""
Mimics the default :class:`argparse.ArgumentParser` class, with a few distinctions, mostly to ease subparser usage:
- If `add_subparsers` is called with the `nested_namespaces` kwarg, all subcommand options will be stored in a
nested namespace based on the command name for the subparser
- Adds the `add_subparser` method. After `add_subparsers` has been called, the `add_subparser` method can be used
instead of the `add_parser` method of the object returned by the `add_subparsers` call.
- `add_subparser` takes takes the `parent_defaults` argument, which will set/change the defaults for the parent
parser when that subparser is selected.
- The `get_subparser` method will get the :class:`ArgumentParser` instance for an existing subparser on this parser
- For any arguments defined both in this parser and one of its subparsers, the selected subparser default will
override the main one.
- Adds the `set_post_defaults` method. This works like the normal argparse `set_defaults` method, but all actions
and subparsers will be run before any of these defaults are set.
- Command shortening: If the command for a subparser is abbreviated unambiguously, it will still be accepted.
- The add_argument `nargs` keyword argument supports a range of arguments, e.g. `"2-4"
- If the `raise_errors` keyword argument to `parse_args` is True, a `ParserError` will be raised instead of sys.exit
- If the `file` argument is given to `parse_args`, output will be printed there instead of sys.stdout or stderr
"""
file = None # This is created as a class attribute so that we can set it for parser and all subparsers at once
def __init__(self, **kwargs):
"""
:param nested_namespace_name: When used as a subparser, options from this parser will be stored nested under
this attribute name in the root parser's namespace
"""
# Do this early, so even option processing stuff is caught
if '--bugreport' in unicode_argv():
self._debug_tb_callback()
self.subparsers = None
self.raise_errors = None
ArgParser.__init__(self, **kwargs)
# Overwrite _SubparserAction with our custom one
self.register('action', 'parsers', NestedSubparserAction)
self.post_defaults = {}
if kwargs.get('parents'):
for parent in kwargs['parents']:
if hasattr(parent, 'post_defaults'):
self.set_post_defaults(**parent.post_defaults)
def add_argument(self, *args, **kwargs):
if isinstance(kwargs.get('nargs'), str) and '-' in kwargs['nargs']:
# Handle a custom range of arguments
min, max = kwargs['nargs'].split('-')
min, max = int(min), int(max)
kwargs['action'] = required_length(min, max)
# Make the usage string a bit better depending on whether the first argument is optional
if min == 0:
kwargs['nargs'] = '*'
else:
kwargs['nargs'] = '+'
return super(ArgumentParser, self).add_argument(*args, **kwargs)
def _print_message(self, message, file=None):
"""If a file argument was passed to `parse_args` make sure output goes there."""
if self.file:
file = self.file
super(ArgumentParser, self)._print_message(message, file)
def set_post_defaults(self, **kwargs):
"""Like set_defaults method, but these defaults will be defined after parsing instead of before."""
self.post_defaults.update(kwargs)
# if these defaults match any existing arguments, suppress
# the previous default so that it can be filled after parsing
for action in self._actions:
if action.dest in kwargs:
action.default = SUPPRESS
def error(self, msg):
raise ParserError(msg, self)
def parse_args(self, args=None, namespace=None, raise_errors=False, file=None):
"""
:param raise_errors: If this is true, errors will be raised as `ParserError`s instead of calling sys.exit
"""
ArgumentParser.file = file
try:
return super(ArgumentParser, self).parse_args(args, namespace)
except ParserError as e:
if raise_errors:
raise
super(ArgumentParser, e.parser).error(e.message)
finally:
ArgumentParser.file = None
def parse_known_args(self, args=None, namespace=None):
if args is None:
# Decode all arguments to unicode before parsing
args = unicode_argv()[1:]
if namespace is None:
namespace = ScopedNamespace()
namespace, args = super(ArgumentParser, self).parse_known_args(args, namespace)
# add any post defaults that aren't present
for dest in self.post_defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self.post_defaults[dest])
return namespace, args
def add_subparsers(self, **kwargs):
"""
:param nested_namespaces: If True, options from subparsers will appear in nested namespace under the subparser
name.
"""
# Set the parser class so subparsers don't end up being an instance of a subclass, like CoreArgumentParser
kwargs.setdefault('parser_class', ArgumentParser)
self.subparsers = super(ArgumentParser, self).add_subparsers(**kwargs)
return self.subparsers
def add_subparser(self, name, **kwargs):
"""
Adds a parser for a new subcommand and returns it.
:param name: Name of the subcommand
:param parent_defaults: Default argument values which should be supplied to the parent parser if this subparser
is selected.
"""
if not self.subparsers:
raise TypeError('This parser does not have subparsers')
result = self.subparsers.add_parser(name, **kwargs)
return result
def get_subparser(self, name, default=_UNSET):
if not self.subparsers:
raise TypeError('This parser does not have subparsers')
p = self.subparsers.choices.get(name, default)
if p is _UNSET:
raise ValueError('%s is not an existing subparser name' % name)
return p
def _get_values(self, action, arg_strings):
"""Complete the full name for partial subcommands"""
if action.nargs == PARSER and self.subparsers:
subcommand = arg_strings[0]
if subcommand not in self.subparsers.choices:
matches = [x for x in self.subparsers.choices if x.startswith(subcommand)]
if len(matches) == 1:
arg_strings[0] = matches[0]
return super(ArgumentParser, self)._get_values(action, arg_strings)
def _debug_tb_callback(self, *dummy):
import cgitb
cgitb.enable(format="text")
# This will hold just the arguments directly for Manager.
manager_parser = ArgumentParser(add_help=False)
manager_parser.add_argument('-V', '--version', action=VersionAction, version=flexget.__version__,
help='Print FlexGet version and exit.')
manager_parser.add_argument('--test', action='store_true', dest='test', default=0,
help='Verbose what would happen on normal execution.')
manager_parser.add_argument('-c', dest='config', default='config.yml',
help='Specify configuration file. Default: %(default)s')
manager_parser.add_argument('--logfile', '-l', default='flexget.log',
help='Specify a custom logfile name/location. '
'Default: %(default)s in the config directory.')
manager_parser.add_argument('--loglevel', '-L', metavar='LEVEL',
help='Set the verbosity of the logger. Levels: %(choices)s',
choices=['none', 'critical', 'error', 'warning', 'info', 'verbose', 'debug', 'trace'])
manager_parser.set_post_defaults(loglevel='verbose')
# This option is already handled above.
manager_parser.add_argument('--bugreport', action='store_true', dest='debug_tb',
help='Use this option to create a detailed bug report, '
'note that the output might contain PRIVATE data, so edit that out')
manager_parser.add_argument('--profile', metavar='OUTFILE', nargs='?', const='flexget.profile',
help='Use the python profiler for this run to debug performance issues.')
manager_parser.add_argument('--debug', action=DebugAction, nargs=0, help=SUPPRESS)
manager_parser.add_argument('--debug-trace', action=DebugTraceAction, nargs=0, help=SUPPRESS)
manager_parser.add_argument('--debug-sql', action='store_true', default=False, help=SUPPRESS)
manager_parser.add_argument('--experimental', action='store_true', default=False, help=SUPPRESS)
manager_parser.add_argument('--ipc-port', type=int, help=SUPPRESS)
manager_parser.add_argument('--cron', action=CronAction, default=False, nargs=0,
help='use when executing FlexGet non-interactively: allows background '
'maintenance to run, disables stdout and stderr output, reduces logging level')
class CoreArgumentParser(ArgumentParser):
"""
The core argument parser, contains the manager arguments, command parsers, and plugin arguments.
Warning: Only gets plugin arguments if instantiated after plugins have been loaded.
"""
def __init__(self, **kwargs):
kwargs.setdefault('parents', [manager_parser])
kwargs.setdefault('prog', 'flexget')
super(CoreArgumentParser, self).__init__(**kwargs)
self.add_subparsers(title='commands', metavar='<command>', dest='cli_command', nested_namespaces=True)
# The parser for the execute command
exec_parser = self.add_subparser('execute', help='execute tasks now')
exec_parser.add_argument('--tasks', nargs='+', metavar='TASK',
help='run only specified task(s), optionally using glob patterns ("tv-*"). '
'matching is case-insensitive')
exec_parser.add_argument('--learn', action='store_true', dest='learn', default=False,
help='matches are not downloaded but will be skipped in the future')
exec_parser.add_argument('--profile', action='store_true', default=False, help=SUPPRESS)
exec_parser.add_argument('--disable-phases', nargs='*', help=SUPPRESS)
exec_parser.add_argument('--inject', nargs='+', action=InjectAction, help=SUPPRESS)
# Plugins should respect these flags where appropriate
exec_parser.add_argument('--retry', action='store_true', dest='retry', default=False, help=SUPPRESS)
exec_parser.add_argument('--no-cache', action='store_true', dest='nocache', default=False,
help='disable caches. works only in plugins that have explicit support')
daemonize_help = SUPPRESS
if not sys.platform.startswith('win'):
daemonize_help = 'causes process to daemonize after starting'
# The parser for the daemon command
daemon_parser = self.add_subparser('daemon', parent_defaults={'loglevel': 'info'},
help='run continuously, executing tasks according to schedules defined '
'in config')
daemon_parser.add_subparsers(title='actions', metavar='<action>', dest='action')
start_parser = daemon_parser.add_subparser('start', help='start the daemon')
start_parser.add_argument('-d', '--daemonize', action='store_true', help=daemonize_help)
stop_parser = daemon_parser.add_subparser('stop', help='shutdown the running daemon')
stop_parser.add_argument('--wait', action='store_true',
help='wait for all queued tasks to finish before stopping daemon')
daemon_parser.add_subparser('status', help='check if a daemon is running')
daemon_parser.add_subparser('reload', help='causes a running daemon to reload the config from disk')
def add_subparsers(self, **kwargs):
# The subparsers should not be CoreArgumentParsers
kwargs.setdefault('parser_class', ArgumentParser)
return super(CoreArgumentParser, self).add_subparsers(**kwargs)
def parse_args(self, *args, **kwargs):
result = super(CoreArgumentParser, self).parse_args(*args, **kwargs)
# Make sure we always have execute parser settings even when other commands called
if not result.cli_command == 'execute':
exec_options = get_parser('execute').parse_args([])
if hasattr(result, 'execute'):
exec_options.__dict__.update(result.execute.__dict__)
result.execute = exec_options
return result
| mit | -7,175,028,045,693,146,000 | 45.817787 | 120 | 0.633647 | false |
google-research/tapas | tapas/utils/pruning_utils.py | 1 | 22339 | # coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines the methods to us for columns selection."""
import collections
import enum
import queue
from typing import Dict, Tuple, Text, Set, Optional, List, Iterable
import apache_beam as beam
import dataclasses
from nltk.stem.porter import PorterStemmer
from tapas.protos import interaction_pb2
from tapas.protos import table_selection_pb2
from tapas.utils import text_utils
from tapas.utils import tf_example_utils
_NS = "main"
_Coordinates = tf_example_utils.TokenCoordinates
def get_column_texts(
table,
column_index,
):
"""Iterates through the text in the column including the header."""
yield table.columns[column_index].text
for row in table.rows:
yield row.cells[column_index].text
def _get_column_cost(
tokenizer,
table,
column_index,
):
r"""Computes length of the serialized column."""
num_tokens = 0
for text in get_column_texts(table, column_index):
num_tokens += len(tokenizer.tokenize(text))
return num_tokens
def _get_question_cost(
tokenizer,
question,
):
r"""Computes length of the serialized question (w/ special token offset)."""
tokens = tokenizer.tokenize(
text_utils.normalize_for_match(question.original_text))
return tokenizer.question_encoding_cost(tokens)
def get_cleaned_seq_tokens(str_tokens):
"""Transform a string to a cleaned list of tokens.
Args:
str_tokens: the string to tokenize
Returns:
A list of tokens
"""
stemmer = PorterStemmer()
tokens = text_utils.tokenize_text(text_utils.format_text(str_tokens))
return [stemmer.stem(token) for token in tokens]
def from_selected_columns_to_selected_tokens(
interaction,
selected_columns,
tokenizer):
r"""Gets all the tokens' coordinates of the selected columns."""
selected_tokens = set()
def _add_cell_tokens(column_index, row_index, cell_text, selected_tokens):
cell_tokens = tokenizer.tokenize(cell_text)
for token_index in range(len(cell_tokens)):
selected_tokens.add(
_Coordinates(
column_index=column_index,
row_index=row_index,
token_index=token_index))
for column_index in selected_columns:
_add_cell_tokens(column_index, 0,
interaction.table.columns[column_index].text,
selected_tokens)
for row_index, row in enumerate(interaction.table.rows):
_add_cell_tokens(column_index, row_index + 1,
row.cells[column_index].text, selected_tokens)
return selected_tokens
def _get_question_column_similarity(column_tokens,
question_tokens,
epsilon = 0.0):
r"""Gives the scored using Jaccard coefficient.
(for more details see https://en.wikipedia.org/wiki/Jaccard_index)
Formula: score = |{token \in C | token \in Q}| / |{token\in C}|.
Where C contains all the tokens extracted from the column i's strings
and Q the question's string.
Args:
column_tokens: expected set of tokens from one column
question_tokens: expected set of tokens from one question
epsilon: expected value >= 0.0. In some cases the expected returned values >
0.0 (non zero). The epsilon must be set to a value > 0.0.
Returns:
The Jaccard coefficient
"""
nominator = column_tokens & question_tokens
denominator = column_tokens | question_tokens
if not denominator and not epsilon:
return 0.0
return (len(nominator) + epsilon) / (len(denominator) + epsilon)
class SelectionType(enum.Enum):
COLUMN = 1
CELL = 2
@dataclasses.dataclass
class TokenScoreDebugInfo:
score: float = 0.0
nb_tokens: int = 0
@dataclasses.dataclass(frozen=True)
class TokenSelection:
selected_tokens: Set[_Coordinates]
debug: Optional[table_selection_pb2.TableSelection.DebugInfo] = None
class TokenSelector:
"""Base class for column selection."""
def __init__(self, vocab_file, use_previous_questions):
self._use_previous_questions = use_previous_questions
self._tokenizer = tf_example_utils.TapasTokenizer(vocab_file)
def prepare(self):
pass
def select_tokens(
self,
interaction,
question,
):
raise NotImplementedError()
def annotated_interaction(
self,
interaction,
):
"""Selects columns using select_columns_fn for each question in the current interaction."""
new_interaction = interaction_pb2.Interaction()
new_interaction.CopyFrom(interaction)
questions = []
for q in new_interaction.questions:
text = q.text
if self._use_previous_questions:
questions.append(q.original_text)
q.text = " ".join(questions)
else:
q.text = q.original_text
token_selection = self.select_tokens(interaction, q)
q.text = text
selected_tokens = token_selection.selected_tokens
table_ext = table_selection_pb2.TableSelection.table_selection_ext
question_tokens = q.Extensions[table_ext].selected_tokens
del question_tokens[:]
for token in selected_tokens:
new_token = question_tokens.add()
new_token.column_index = token.column_index
new_token.row_index = token.row_index
new_token.token_index = token.token_index
if token_selection.debug:
q.Extensions[table_ext].debug.CopyFrom(token_selection.debug)
return new_interaction
class MaxTokenSelector(TokenSelector):
"""Selects columns until a certain number of tokens is met."""
def __init__(self, vocab_file, max_nb_tokens,
selection_level, use_previous_answer,
use_previous_questions):
super().__init__(
vocab_file=vocab_file, use_previous_questions=use_previous_questions)
self._max_nb_tokens = max_nb_tokens
self._use_previous_answer = use_previous_answer
# Extracts columns or cells depending on the selection_level.
if selection_level == SelectionType.CELL:
self.select_tokens = self._select_cells
elif selection_level == SelectionType.COLUMN:
self.select_tokens = self._select_columns
else:
raise ValueError(
f"The selection level is not implemented {selection_level}")
def _get_column_tokens(self, interaction,
column_index):
column_tokens = []
for row in interaction.table.rows:
column_tokens.extend(get_cleaned_seq_tokens(row.cells[column_index].text))
column_tokens.extend(
get_cleaned_seq_tokens(interaction.table.columns[column_index].text))
return column_tokens
def _get_columns_tokens(
self, interaction):
return {
column_index: self._get_column_tokens(interaction, column_index)
for column_index in range(len(interaction.table.columns))
}
def _get_columns_similarity_scores(
self, interaction,
question_tokens):
columns_tokens = self._get_columns_tokens(interaction)
columns_score = {
column_index:
_get_question_column_similarity(set(column_tokens), question_tokens)
for column_index, column_tokens in columns_tokens.items()
}
return columns_score
def _get_headers_tokens(
self, interaction):
return {
column_index: get_cleaned_seq_tokens(column.text)
for column_index, column in enumerate(interaction.table.columns)
}
def _get_headers_similarity_scores(
self, interaction,
question_tokens):
headers_tokens = self._get_headers_tokens(interaction)
return {
column_index:
_get_question_column_similarity(set(header_token), question_tokens)
for column_index, header_token in headers_tokens.items()
}
def _get_cells(
self, interaction
):
r"""Extracts cells tokens.
Args:
interaction: Contains the table cells.
Returns:
Dictionary where the keys are the row indexe and column index of the cell.
The value is the list of tokens o a cell.
"""
cells = {}
for column_index in range(len(interaction.table.columns)):
cells[(column_index, 0)] = get_cleaned_seq_tokens(
interaction.table.columns[column_index].text)
for row_index, row in enumerate(interaction.table.rows):
cells[(column_index, row_index + 1)] = get_cleaned_seq_tokens(
row.cells[column_index].text)
return cells
def _get_row_tokens(self, interaction,
row_index):
row_tokens = []
for column_index in range(len(interaction.table.columns)):
cell_tokens = get_cleaned_seq_tokens(
interaction.table.rows[row_index].cells[column_index].text)
row_tokens.extend(cell_tokens)
return row_tokens
def _get_header_tokens(
self, interaction):
row_tokens = []
for column in interaction.table.columns:
cell_tokens = get_cleaned_seq_tokens(column.text)
row_tokens.extend(cell_tokens)
return row_tokens
def _get_row_similarity_scores(
self, interaction,
question_tokens):
r"""Computes the rows scores.
Args:
interaction: Contains the table cells.
question_tokens: List of the question tokens.
Returns:
Dictionary where the keys are the rows' indexes.
Row index = 0 is the header.
"""
header_tokens = self._get_header_tokens(interaction)
row_scores = {
0: _get_question_column_similarity(set(header_tokens), question_tokens)
}
for row_index in range(len(interaction.table.rows)):
row_tokens = self._get_row_tokens(interaction, row_index)
row_scores[row_index + 1] = _get_question_column_similarity(
set(row_tokens), question_tokens)
return row_scores
def _update_priority_queue_from_previous_answer(
self,
selection_type,
priority_queue,
interaction,
question,
):
r"""gives high priority to a answer columns from the previous question.
Args:
selection_type: Specify the selection type: column or a cell.
priority_queue: expected values: Tuple (-score, index of scored column) or
Tuple (-score, index of scored column, index of row , index of token)
interaction: contains the table cells.
question: contains the original text of the question.
Returns:
A modified priority queue.
"""
if not self._use_previous_answer:
return priority_queue
if len(interaction.questions) > 1:
index = 0
for index, q in enumerate(interaction.questions):
if q.id == question.id:
break
if index > 0:
answer_coords = interaction.questions[index -
1].answer.answer_coordinates
if selection_type == SelectionType.COLUMN:
answer_coords = set([c.column_index for c in answer_coords])
elif selection_type == SelectionType.CELL:
answer_coords = [(c.row_index, c.column_index) for c in answer_coords]
else:
raise NotImplementedError()
new_priority_queue = queue.PriorityQueue()
# The priority queue prioritize the minimum scores:
# max_priority_score = - max_score
max_priority_score = -len(interaction.table.columns)
while not priority_queue.empty():
element = priority_queue.get()
if selection_type == SelectionType.COLUMN:
_, index_look_up = element
new_element = (max_priority_score, index_look_up)
elif selection_type == SelectionType.CELL:
_, column_index, row_index, token_index = element
index_look_up = (row_index, column_index)
new_element = (max_priority_score, column_index, row_index,
token_index)
else:
raise NotImplementedError()
if index_look_up in answer_coords:
new_priority_queue.put(new_element)
else:
new_priority_queue.put(element)
return new_priority_queue
return priority_queue
def _get_columns_from_priority_queue(
self,
columns_queue,
interaction,
question,
):
r"""Selects tokenss with higher score up to max_nb_tokens.
Args:
columns_queue: expected values: Tuple (-score, index of scored column)
interaction: contains the table cells.
question: contains the original text of the question.
Returns:
The set of selected columns' indexes.
"""
columns_queue = self._update_priority_queue_from_previous_answer(
SelectionType.COLUMN, columns_queue, interaction, question)
selected_columns = set()
num_tokens = _get_question_cost(self._tokenizer, question)
table = interaction.table
num_columns_skipped = 0
debug = table_selection_pb2.TableSelection.DebugInfo()
while not columns_queue.empty():
score, column_index = columns_queue.get()
column_cost = _get_column_cost(self._tokenizer, table, column_index)
column_debug = debug.columns.add()
column_debug.index = column_index
column_debug.score = -float(score)
# Selects columns without exceeding maximum number of tokens.
if num_tokens + column_cost > self._max_nb_tokens:
num_columns_skipped += 1
column_debug.is_selected = False
continue
column_debug.is_selected = True
if num_columns_skipped > 0:
beam.metrics.Metrics.counter(_NS, "Squeezing in another column").inc()
num_tokens += column_cost
selected_columns.add(column_index)
if not selected_columns:
beam.metrics.Metrics.counter(_NS, "No column selected").inc()
if num_columns_skipped == 0:
beam.metrics.Metrics.counter(_NS, "All columns selected").inc()
else:
beam.metrics.Metrics.counter(_NS, "Columns removed").\
inc(num_columns_skipped)
# Check if an answer column wasn't selected.
for answer_coordinate in question.answer.answer_coordinates:
answer_column_index = answer_coordinate.column_index
if answer_column_index < 0:
continue
if answer_column_index not in selected_columns:
beam.metrics.Metrics.counter(_NS, "Answer columns removed").inc()
if not question.answer.HasField("float_value"):
beam.metrics.Metrics.counter(_NS, "Unambiguous columns removed").inc()
break
selected_tokens = from_selected_columns_to_selected_tokens(
interaction, selected_columns, self._tokenizer)
return TokenSelection(selected_tokens, debug)
def _get_tokens_from_priority_queue(
self,
tokens_queue,
interaction,
question,
):
r"""Selects tokenss with higher score up to max_nb_tokens.
Args:
tokens_queue: expected values: Tuple (-score, column_index, row_index,
token_index)
interaction: contains the table cells.
question: contains the original text of the question.
Returns:
The set of selected columns' indexes.
"""
tokens_queue = self._update_priority_queue_from_previous_answer(
SelectionType.CELL, tokens_queue, interaction, question)
table = interaction.table
selected_tokens = set()
num_tokens = _get_question_cost(self._tokenizer, question)
debug = table_selection_pb2.TableSelection.DebugInfo()
debug_column = collections.defaultdict(TokenScoreDebugInfo)
cell_score = collections.defaultdict(lambda: 0.0)
if num_tokens > self._max_nb_tokens:
beam.metrics.Metrics.counter(_NS, "No column selected").inc()
return TokenSelection(selected_tokens)
while not tokens_queue.empty() and num_tokens < self._max_nb_tokens:
num_tokens += 1
score, column_index, row_index, token_index = tokens_queue.get()
selected_tokens.add(_Coordinates(column_index, row_index, token_index))
debug_column[column_index].score -= float(score)
debug_column[column_index].nb_tokens += 1
previous_score = cell_score[(column_index, row_index)]
cell_score[(column_index, row_index)] = max(-float(score), previous_score)
# Add debug for column selection.
num_columns_skipped = 0
num_tokens_skipped = 0
num_cells_skipped = 0
for column_index in range(len(table.columns)):
column_debug = debug.columns.add()
column_debug.index = column_index
if column_index in debug_column.keys():
debug_info = debug_column[column_index]
column_debug.score = debug_info.score / debug_info.nb_tokens
column_debug.is_selected = True
column_cost = _get_column_cost(self._tokenizer, table, column_index)
num_tokens_skipped += column_cost - debug_info.nb_tokens
num_rows_selected = len(
set(coord.row_index
for coord in selected_tokens
if coord.column_index == column_index))
num_cells_skipped += len(table.rows) + 1 - num_rows_selected
else:
column_debug.score = 0.0
column_debug.is_selected = False
num_columns_skipped += 1
if num_tokens_skipped == 0:
beam.metrics.Metrics.counter(_NS, "All tokens are selected").inc()
else:
beam.metrics.Metrics.counter(_NS,
"Tokens removed").inc(num_tokens_skipped)
if num_cells_skipped == 0:
beam.metrics.Metrics.counter(
_NS, "Selected at least one token of every cell").inc()
else:
beam.metrics.Metrics.counter(
_NS, "Cells removed (no token selected)").inc(num_cells_skipped)
if num_columns_skipped == 0:
beam.metrics.Metrics.counter(
_NS, "Selected at least one token of every column").inc()
else:
beam.metrics.Metrics.counter(
_NS, "Columns removed (no token selected)").inc(num_columns_skipped)
# Check if an answer column wasn't selected.
scored_zero = 0
for answer_coordinate in question.answer.answer_coordinates:
answer_column_index = answer_coordinate.column_index
answer_row_index = answer_coordinate.row_index
if answer_column_index < 0:
continue
if (answer_column_index, answer_row_index) in cell_score.keys():
found_score = cell_score[(answer_column_index, answer_row_index)]
if found_score == 0:
scored_zero += 1
else:
beam.metrics.Metrics.counter(_NS, "Answer cells removed").inc()
if not question.answer.HasField("float_value"):
beam.metrics.Metrics.counter(_NS, "Unambiguous cells removed").inc()
break
if scored_zero > 0:
beam.metrics.Metrics.counter(_NS,
"Answer cell is scored 0").inc(scored_zero)
return TokenSelection(selected_tokens, debug)
def _select_columns(
self,
interaction,
question,
):
raise NotImplementedError()
def _select_cells(
self,
interaction,
question,
):
raise NotImplementedError()
class HeuristicExactMatchTokenSelector(MaxTokenSelector):
r"""Extracts columns that contain tokens'strings match a subset of the question's string."""
def _select_columns(
self,
interaction,
question,
):
r"""Extracts columns that contain tokens'strings match a subset of the question's string.
Args:
interaction: contains the cells.
question: contains the original text of the question.
Returns:
The set of selected columns' indexes.
"""
question_tokens = set(get_cleaned_seq_tokens(question.text))
columns_queue = queue.PriorityQueue()
for i in range(len(interaction.table.columns)):
column_tokens = self._get_column_tokens(interaction, i)
score = _get_question_column_similarity(
set(column_tokens), question_tokens)
columns_queue.put((-score, i))
return self._get_columns_from_priority_queue(columns_queue, interaction,
question)
def _select_cells(
self,
interaction,
question,
):
r"""Extracts cells that contain tokens'strings match a subset of the question's string.
The final score used for the priority queue is cell_score + column_score +
row_score.
Args:
interaction: contains the cells.
question: contains the original text of the question.
Returns:
The set of selected tokens' indexes.
"""
question_tokens = set(get_cleaned_seq_tokens(question.text))
rows_scores = self._get_row_similarity_scores(interaction, question_tokens)
columns_scores = self._get_columns_similarity_scores(
interaction, question_tokens)
cells = self._get_cells(interaction)
tokens_queue = queue.PriorityQueue()
for indexes, cell_tokens in cells.items():
column_index, row_index = indexes
row_score = rows_scores[row_index]
column_score = columns_scores[column_index]
cell_score = _get_question_column_similarity(
set(cell_tokens), question_tokens)
score = column_score + row_score + cell_score
for token_index in range(len(cell_tokens)):
tokens_queue.put((-score, column_index, row_index, token_index))
return self._get_tokens_from_priority_queue(tokens_queue, interaction,
question)
class SelectAllTokensFn(TokenSelector):
"""Extracts all the columns."""
def select_tokens(self, interaction,
question):
r"""Extracts all the columns.
Args:
interaction: gives the number of columns.
question: not used.
Returns:
The set of all columns' indexes.
"""
del question # Unused
selected_columns = set(range(len(interaction.table.columns)))
selected_tokens = from_selected_columns_to_selected_tokens(
interaction, selected_columns, self._tokenizer)
return TokenSelection(selected_tokens)
| apache-2.0 | 5,498,816,398,818,340,000 | 33.053354 | 95 | 0.660683 | false |
emkailu/PAT3DEM | bin/p3locres2bf.py | 1 | 1509 | #!/usr/bin/env python
import os
import sys
import argparse
from EMAN2 import *
import pat3dem.pdb as p3p
import pat3dem.main as p3m
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <localres.mrc>
Get local resolution from localres.mrc and transfer to bfactors in pdb.
"""
args_def = {'apix':1.25, 'pdb':''}
parser = argparse.ArgumentParser()
parser.add_argument("mrc", nargs='*', help="specify localres.mrc to be processed")
parser.add_argument("-a", "--apix", type=float, help="specify apix, by default {}".format(args_def['apix']))
parser.add_argument("-p", "--pdb", type=str, help="specify the pdb, by default {}".format(args_def['pdb']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# read the pdb
with open(args.pdb) as p:
lines = p.readlines()
ATM = []
for i in lines:
if i[:4] == 'ATOM' or i[:6] == 'HETATM':
ATM += [i]
basename = os.path.basename(os.path.splitext(args.pdb)[0])
a = args.apix
with open(basename + '_locres2bf.pdb', 'w') as w_pdb:
d = EMData(args.mrc[0])
for i in ATM:
(x, y, z) = p3p.get_coord(i)
x, y, z = int(x/a), int(y/a), int(z/a)
res = d.get_value_at(x, y, z)
i = list(i)
i[60:66] = '{:6.2f}'.format(res)
w_pdb.write(''.join(i))
if __name__ == '__main__':
main()
| mit | 3,064,638,372,130,385,000 | 28.588235 | 109 | 0.614314 | false |
gavruskin/microinteractions | data_preprocess_6_reps_with_negatives.py | 1 | 9983 | import pandas as pd
data = pd.read_csv("development_times_exp1and2.csv", sep="\t")
# Add all parameters (Taylor coefficients) as 0 in rows following the data:
for i in range(data.shape[0]):
for j in range(10, 42):
data.set_value(i, j, 0)
data.rename(columns={10: "a", 11: "a1", 12: "a2", 13: "a3", 14: "a4", 15: "a5",
16: "b12", 17: "b13", 18: "b14", 19: "b15", 20: "b23", 21: "b24",
22: "b25", 23: "b34", 24: "b35", 25: "b45", 26: "c123", 27: "c124",
28: "c125", 29: "c134", 30: "c135", 31: "c145", 32: "c234", 33: "c235",
34: "c245", 35: "c345", 36: "d1234", 37: "d1235", 38: "d1245",
39: "d1345", 40: "d2345", 41: "e12345"}, inplace=True)
# Change coefficients corresponding to present effects to 1:
for index, row in data.iterrows():
species = row["LP"] + row["LB"] + row["AP"] + row["AT"] + row["AO"]
if species == "YNNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
if species == "NYNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
if species == "NNYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
if species == "NNNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
if species == "NNNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a5", 1)
if species == "YYNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "b12", -1)
if species == "YNYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b13", -1)
if species == "YNNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b14", -1)
if species == "YNNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b15", -1)
if species == "NYYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b23", -1)
if species == "NYNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b24", -1)
if species == "NYNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b25", -1)
if species == "NNYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b34", -1)
if species == "NNYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b35", -1)
if species == "NNNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b45", -1)
if species == "YYYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "c123", 1)
if species == "YYNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "c124", 1)
if species == "YYNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "c125", 1)
if species == "NYYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c234", 1)
if species == "NNYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c345", 1)
if species == "YNYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c134", 1)
if species == "YNYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c135", 1)
if species == "YNNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c145", 1)
if species == "NYNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c245", 1)
if species == "NYYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c235", 1)
if species == "YYYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "d1234", -1)
if species == "YYYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "d1235", -1)
if species == "YYNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "d1245", -1)
if species == "YNYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1345", -1)
if species == "NYYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d2345", -1)
if species == "YYYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1234", -1)
data.set_value(index, "d1235", -1)
data.set_value(index, "d1245", -1)
data.set_value(index, "d1345", -1)
data.set_value(index, "d2345", -1)
data.set_value(index, "e12345", 1)
if species == "NNNNN":
data.set_value(index, "a", 1)
data.to_csv("fitness_summary_6_replicates_parameters.csv", sep="\t")
| mit | 9,091,952,240,143,484,000 | 32.840678 | 79 | 0.601222 | false |
WIPACrepo/iceprod | tests/core/exe_json_test.py | 1 | 16736 | """
Test script for core exe_json
"""
from __future__ import absolute_import, division, print_function
from tests.util import unittest_reporter, glob_tests, cmp_dict
import logging
logger = logging.getLogger('exe')
import os
import sys
import time
import shutil
import tempfile
import random
import string
import subprocess
from functools import partial
try:
import cPickle as pickle
except:
import pickle
import unittest
from unittest.mock import patch
from tornado.testing import AsyncTestCase
from iceprod.core import to_log,constants
import iceprod.core.dataclasses
import iceprod.core.functions
import iceprod.core.exe
import iceprod.core.exe_json
from iceprod.core.jsonUtil import json_encode,json_decode,json_compressor
class exe_json_test(AsyncTestCase):
def setUp(self):
super(exe_json_test,self).setUp()
curdir = os.getcwd()
self.test_dir = tempfile.mkdtemp(dir=curdir)
os.symlink(os.path.join(curdir, 'iceprod'),
os.path.join(self.test_dir, 'iceprod'))
os.chdir(self.test_dir)
def cleanup():
os.chdir(curdir)
shutil.rmtree(self.test_dir)
self.addCleanup(cleanup)
# set offline mode
self.config = iceprod.core.exe.Config()
self.config.config['options']['offline'] = True
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
def test_01_ServerComms(self, Client):
"""Test ServerComms"""
address = 'http://test'
passkey = 'ksdf8n4'
Client.return_value.request.return_value = 'e'
iceprod.core.exe_json.ServerComms(address, passkey, config=self.config)
self.assertTrue(Client.called)
logger.info('%r',Client.call_args[1])
self.assertEqual({'address':address,'token':passkey}, Client.call_args[1])
Client.reset_mock()
kwargs = {'ssl_cert':'cert','ssl_key':'key','cacert':'ca'}
iceprod.core.exe_json.ServerComms(address, passkey,
config=self.config, **kwargs)
self.assertTrue(Client.called)
expected = {'address':address}
expected.update(kwargs)
self.assertTrue(cmp_dict(expected, Client.call_args[1]))
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_10_download_task(self, Client):
"""Test download_task"""
task = {'dataset':10}
c = Client.return_value
async def req(method, path, args=None):
if path.startswith('/task_actions/process'):
return {'task':'foo', 'task_id':'foo', 'job_id':'bar', 'dataset_id':'bar', 'task_index':1}
elif path.startswith('/config'):
return {'dataset_id':'bar'}
elif path.startswith('/jobs'):
return {'job_index':1}
elif path.startswith('/datasets'):
return {'dataset':1, 'jobs_submitted':10, 'tasks_submitted':1, 'debug':True}
c.request.side_effect = req
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=self.config)
ret = (await rpc.download_task('gridspec'))[0]
self.assertIn('options', ret)
self.assertIn('task', ret['options'])
self.assertEqual(ret['options']['task'], 1)
self.assertIn('jobs_submitted', ret['options'])
self.assertEqual(ret['options']['jobs_submitted'], 10)
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_12_task_files(self, Client):
"""Test task_files"""
c = Client.return_value
async def req(*args,**kwargs):
d = iceprod.core.dataclasses.Data()
d['remote'] = 'foo'
return {'files': [d]}
c.request.side_effect = req
self.config.config['options']['dataset_id'] = 'd'
self.config.config['options']['task_id'] = 't'
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=self.config)
files = await rpc.task_files('d','t')
self.assertTrue(c.request.called)
logger.info(c.request.call_args[0])
logger.info('files: %r', files)
self.assertEqual(len(files), 1)
self.assertEqual(files[0]['remote'], 'foo')
async def req(*args,**kwargs):
return {'files': [{'movement':'blah'}]}
c.request.side_effect = req
with self.assertRaises(Exception):
await rpc.task_files('d','t')
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_15_processing(self, Client):
"""Test processing"""
c = Client.return_value
async def req(*args,**kwargs):
return {}
c.request.side_effect = req
self.config.config['options']['task_id'] = 'task'
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=self.config)
await rpc.processing('task')
self.assertTrue(c.request.called)
logger.info(c.request.call_args[0])
self.assertTrue({'status'}.issubset(
c.request.call_args[0][-1]))
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_20_finish_task(self, Client):
"""Test finish_task"""
c = Client.return_value
async def req(*args,**kwargs):
return {}
c.request.side_effect = req
self.config.config['options']['task_id'] = 'task'
stats = {'test':True}
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=self.config)
await rpc.finish_task('task', stats=stats)
self.assertEqual(c.request.call_count, 2)
logger.info(c.request.call_args_list[0][0])
self.assertIn('task_stats', c.request.call_args_list[0][0][-1])
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_30_still_running(self, Client):
"""Test still_running"""
c = Client.return_value
self.config.config['options']['task_id'] = 'task'
async def req(*args,**kwargs):
return {'status':'processing'}
c.request.side_effect = req
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=self.config)
await rpc.still_running('task')
self.assertTrue(c.request.called)
async def req(*args,**kwargs):
return {'status':'reset'}
c.request.side_effect = req
with self.assertRaises(Exception):
await rpc.still_running('task')
c.request.side_effect = Exception('request error')
with self.assertRaises(Exception):
await rpc.still_running('task')
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_40_task_error(self, Client):
"""Test task_error"""
c = Client.return_value
self.config.config['options']['task_id'] = 'task'
async def req(*args,**kwargs):
return {}
c.request.side_effect = req
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=self.config)
await rpc.task_error('task')
self.assertEqual(c.request.call_count, 2)
logger.info(c.request.call_args_list[0][0])
self.assertIn('task_stats', c.request.call_args_list[0][0][-1])
self.assertIn('error_summary', c.request.call_args_list[0][0][-1])
c.request.reset_mock()
data = ''.join(random.choice(string.ascii_letters) for _ in range(10000))
await rpc.task_error('task', reason=data, start_time=time.time()-200)
self.assertEqual(c.request.call_count, 2)
logger.info(c.request.call_args_list[0][0])
self.assertIn('task_stats', c.request.call_args_list[0][0][-1])
self.assertIn('error_summary', c.request.call_args_list[0][0][-1])
self.assertEqual(data, c.request.call_args_list[0][0][-1]['error_summary'])
self.assertGreaterEqual(c.request.call_args_list[0][0][-1]['time_used'], 200)
c.request.side_effect = Exception('request error')
with self.assertRaises(Exception):
await rpc.task_error('task')
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_41_task_kill(self, Client):
"""Test task_kill"""
c = Client.return_value
task_id = 'task'
async def req(*args,**kwargs):
return {}
c.request.side_effect = req
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=None)
await rpc.task_kill(task_id)
self.assertEqual(c.request.call_count, 5)
logger.info(c.request.call_args_list[0][0])
self.assertIn('error_summary', c.request.call_args_list[0][0][-1])
c.request.reset_mock()
resources = {'cpu': 1, 'memory': 3.4, 'disk': 0.2}
await rpc.task_kill(task_id, resources=resources)
self.assertEqual(c.request.call_count, 5)
logger.info(c.request.call_args_list[0][0])
self.assertIn('error_summary', c.request.call_args_list[0][0][-1])
self.assertIn('resources', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['resources'], resources)
c.request.reset_mock()
resources = {'time': 34.2}
reason = 'testing'
await rpc.task_kill(task_id, resources=resources, reason=reason)
self.assertEqual(c.request.call_count, 5)
logger.info(c.request.call_args_list[0][0])
self.assertIn('error_summary', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['error_summary'], reason)
self.assertIn('resources', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['resources'], resources)
c.request.side_effect = Exception('request error')
with self.assertRaises(Exception):
await rpc.task_kill(task_id)
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_42_task_kill_sync(self, Client):
"""Test task_kill"""
c = Client.return_value
task_id = 'task'
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=None)
rpc.task_kill_sync(task_id)
self.assertEqual(c.request_seq.call_count, 5)
logger.info(c.request_seq.call_args_list[0][0])
self.assertIn('error_summary', c.request_seq.call_args_list[0][0][-1])
c.request_seq.reset_mock()
resources = {'cpu': 1, 'memory': 3.4, 'disk': 0.2}
rpc.task_kill_sync(task_id, resources=resources)
self.assertEqual(c.request_seq.call_count, 5)
logger.info(c.request_seq.call_args_list[0][0])
self.assertIn('error_summary', c.request_seq.call_args_list[0][0][-1])
self.assertIn('resources', c.request_seq.call_args_list[0][0][-1])
self.assertEqual(c.request_seq.call_args_list[0][0][-1]['resources'], resources)
c.request_seq.reset_mock()
resources = {'time': 34.2}
reason = 'testing'
rpc.task_kill_sync(task_id, resources=resources, reason=reason)
self.assertEqual(c.request_seq.call_count, 5)
logger.info(c.request_seq.call_args_list[0][0])
self.assertIn('error_summary', c.request_seq.call_args_list[0][0][-1])
self.assertEqual(c.request_seq.call_args_list[0][0][-1]['error_summary'], reason)
self.assertIn('resources', c.request_seq.call_args_list[0][0][-1])
self.assertEqual(c.request_seq.call_args_list[0][0][-1]['resources'], resources)
c.request_seq.side_effect = Exception('request error')
with self.assertRaises(Exception):
rpc.task_kill_sync(task_id)
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_50_uploadLogging(self, Client):
"""Test uploading logfiles"""
c = Client.return_value
self.config.config['options']['task_id'] = 'task'
async def req(*args,**kwargs):
return {}
c.request.side_effect = req
data = ''.join([str(random.randint(0,10000)) for _ in range(100)])
filename = os.path.join(self.test_dir,str(random.randint(0,10000)))
with open(filename,'w') as f:
f.write(data)
name = 'testing'
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=self.config)
await rpc._upload_logfile(name, filename)
self.assertTrue(c.request.called)
logger.info(c.request.call_args_list[0][0])
self.assertIn('name', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['name'], name)
self.assertIn('data', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['data'],
data)
for f in constants.keys():
if f in ('stderr','stdout','stdlog'):
with open(constants[f],'w') as f:
f.write(''.join([str(random.randint(0,10000))
for _ in range(100)]))
c.request.reset_mock()
await rpc.uploadLog()
self.assertTrue(c.request.called)
logger.info(c.request.call_args_list[0][0])
self.assertIn('name', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['name'], 'stdlog')
self.assertIn('data', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['data'],
open(constants['stdlog']).read())
c.request.reset_mock()
await rpc.uploadErr()
self.assertTrue(c.request.called)
logger.info(c.request.call_args_list[0][0])
self.assertIn('name', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['name'], 'stderr')
self.assertIn('data', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['data'],
open(constants['stderr']).read())
c.request.reset_mock()
await rpc.uploadOut()
self.assertTrue(c.request.called)
logger.info(c.request.call_args_list[0][0])
self.assertIn('name', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['name'], 'stdout')
self.assertIn('data', c.request.call_args_list[0][0][-1])
self.assertEqual(c.request.call_args_list[0][0][-1]['data'],
open(constants['stdout']).read())
c.request.side_effect = Exception('request error')
with self.assertRaises(Exception):
await rpc._upload_logfile(name, filename)
with self.assertRaises(Exception):
await rpc.uploadLog()
with self.assertRaises(Exception):
await rpc.uploadErr()
with self.assertRaises(Exception):
await rpc.uploadOut()
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
async def test_60_update_pilot(self, Client):
"""Test update_pilot"""
c = Client.return_value
pilot_id = 'pilot'
args = {'a': 1, 'b': 2}
async def req(*args,**kwargs):
return {}
c.request.side_effect = req
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=None)
await rpc.update_pilot(pilot_id, **args)
self.assertTrue(c.request.called)
logger.info(c.request.call_args[0])
self.assertTrue({'a','b'}.issubset(c.request.call_args[0][-1]))
c.request.side_effect = Exception('request error')
with self.assertRaises(Exception):
await rpc.update_pilot(pilot_id, **args)
@patch('iceprod.core.exe_json.RestClient')
@unittest_reporter
def test_61_update_pilot_sync(self, Client):
"""Test update_pilot"""
c = Client.return_value
pilot_id = 'pilot'
args = {'a': 1, 'b': 2}
rpc = iceprod.core.exe_json.ServerComms('a', 'p', config=None)
rpc.update_pilot_sync(pilot_id, **args)
self.assertTrue(c.request_seq.called)
logger.info(c.request_seq.call_args[0])
self.assertTrue({'a','b'}.issubset(c.request_seq.call_args[0][-1]))
c.request_seq.side_effect = Exception('request error')
with self.assertRaises(Exception):
rpc.update_pilot_sync(pilot_id, **args)
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
alltests = glob_tests(loader.getTestCaseNames(exe_json_test))
suite.addTests(loader.loadTestsFromNames(alltests,exe_json_test))
return suite
| mit | 8,129,633,243,372,042,000 | 37.036364 | 106 | 0.59859 | false |
Jumpscale/jumpscale_core8 | lib/JumpScale/tools/cuisine/systemservices/CuisineOpenvSwitch.py | 1 | 4701 |
from JumpScale import j
app = j.tools.cuisine._getBaseAppClass()
# TODO: *1 implement & test & document (test on packet.net)
# make sure you use the trick we used in jumpscale/jumpscale_core8/lib/JumpScale/tools/cuisine/systemservices/CuisineFW.py
# : setRuleset... here we use a python script to make sure we set & set back to original if it doesn't work
# can we use j.sal.openvswitch & i saw there is already an .executor in,
# is it usable everywhere?
# please spec properly
class CuisineOpenvSwitch(app):
"""
usage:
```
c=j.tools.cuisine.get("ovh4")
c.systemservices.openvswitch.install()
```
"""
def __init__(self, executor, cuisine):
self._cuisine = cuisine
self._executor = executor
self.__controller = None
self._apt_packages = ['openssl', 'openvswitch-switch', 'openvswitch-common']
@property
def _controller(self):
if not self.__controller:
self.__controller = j.sal.kvm.KVMController(
executor=self._cuisine._executor)
return self.__controller
# def prepare(self):
# self.install()
# # check openvswitch properly configured
def isInstalled(self):
try:
self._cuisine.core.run("ovs-vsctl show")
return True
except Exception as e:
return False
def install(self):
if self.isInstalled():
return
if self._cuisine.core.isUbuntu:
self._cuisine.package.multiInstall(self._apt_packages)
else:
raise RuntimeError("only support ubuntu")
# do checks if openvswitch installed & configured properly if not
# install
def uninstall(self):
if not self.isInstalled():
return
for package in self._apt_packages:
self._cuisine.core.package.remove(package)
def networkCreate(self, network_name, bridge_name=None, interfaces=None, ovs=True, start=True):
"""
Create a network interface using libvirt and open v switch.
@network_name str: name of the network
@bridge_name str: name of the main bridge created to connect to th host
@interfaces [str]: names of the interfaces to be added to the bridge
"""
network = j.sal.kvm.Network(
self._controller, network_name, bridge_name, interfaces, ovs=ovs)
network.create()
if start:
network.start()
def networkDelete(self, bridge_name):
"""
Delete network and bridge related to it.
@bridge_name
"""
network = j.sal.kvm.Network(self._controller, bridge_name)
return network.destroy()
def networkList(self):
"""
List bridges available on machaine created by openvswitch.
"""
_, out, _ = self._cuisine.core.run("ovs-vsctl list-br")
return out.splitlines()
def networkListInterfaces(self, name):
"""
List ports available on bridge specified.
"""
_, out, _ = self._cuisine.core.run("ovs-vsctl list-ports %s" % name)
return out.splitlines()
def vnicCreate(self, name, bridge):
"""
Create and interface and relevant ports connected to certain bridge or network.
@name str: name of the interface and port that will be creates
@bridge str: name of bridge to add the interface to
@qos int: limit the allowed rate to be used by interface
"""
# TODO: *1 spec
# is a name relevant???, how do we identify a vnic
interface = j.sal.kvm.Interface(self._controller, name, bridge)
self.interfaces[name] = interface
interface.create()
def vnicDelete(self, name, bridge):
"""
Delete interface and port related to certain machine.
@bridge str: name of bridge
@name str: name of port and interface to be deleted
"""
interface = j.sal.kvm.Interface(self._controller, name, bridge)
return interface.destroy()
def vnicQOS(self, name, bridge, qos, burst=None):
"""
Limit the throughtput into an interface as a for of qos.
@interface str: name of interface to limit rate on
@qos int: rate to be limited to in Kb
@burst int: maximum allowed burst that can be reached in Kb/s
"""
# TODO: *1 spec what is relevant for a vnic from QOS perspective, what can we do
# goal is we can do this at runtime
interface = j.sal.kvm.Interface(self._controller, name, bridge)
interface.qos(qos, burst)
def vnicBond(self, parameter_list):
raise NotImplemented("in development")
| apache-2.0 | -7,045,752,899,892,516,000 | 31.42069 | 122 | 0.617315 | false |
libresoft/CVSAnaly-ALERT-branch | pycvsanaly2/CVSParser.py | 1 | 8925 | # Copyright (C) 2007 LibreSoft
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors :
# Alvaro Navarro <[email protected]>
# Gregorio Robles <[email protected]>
# Carlos Garcia Campos <[email protected]>
import re
import datetime
from Parser import Parser
from ContentHandler import ContentHandler
from Repository import Commit, Action, Person
class CVSParser (Parser):
CONTENT_ORDER = ContentHandler.ORDER_FILE
patterns = {}
patterns['file'] = re.compile ("^RCS file: (.*)$")
patterns['revision'] = re.compile ("^revision ([\d\.]*)$")
patterns['info'] = \
re.compile ("^date: (\d\d\d\d)[/-](\d\d)[/-](\d\d) (\d\d):(\d\d):(\d\d)(.*); author: (.*); state: ([^;]*);( lines: \+(\d+) -(\d+);?)?")
patterns['branches'] = re.compile ("^branches: ([\d\.]*);$")
patterns['branch'] = re.compile ("^[ \b\t]+(.*): (([0-9]+\.)+)0\.([0-9]+)$")
patterns['tag'] = re.compile ("^[ \b\t]+(.*): (([0-9]+\.)+([0-9]+))$")
patterns['rev-separator'] = re.compile ("^[-]+$")
patterns['file-separator'] = re.compile ("^[=]+$")
def __init__ (self):
Parser.__init__ (self)
self.root_path = ""
self.lines = {}
# Parser context
self.file = None
self.file_added_on_branch = None
self.commit = None
self.branches = None
self.tags = None
self.rev_separator = None
self.file_separator = None
def set_repository (self, repo, uri):
Parser.set_repository (self, repo, uri)
uri = repo.get_uri ()
s = uri.rfind (':')
if s >= 0:
self.root_path = uri[s + 1:]
else:
self.root_path = uri
def _handle_commit (self):
if self.commit is not None:
# Remove trailing \n from commit message
self.commit.message = self.commit.message[:-1]
self.handler.commit (self.commit)
self.commit = None
def flush (self):
self._handle_commit ()
if self.file is not None:
self.handler.file (self.file)
self.file_added_on_branch = None
self.file = None
def get_added_removed_lines (self):
return self.lines
def _parse_line (self, line):
if not line:
if self.commit is None:
return
if self.rev_separator is not None:
self.rev_separator += '\n'
elif self.file_separator is not None:
self.file_separator += '\n'
elif self.commit.message is not None:
self.commit.message += '\n'
return
# Revision Separator
if self.patterns['rev-separator'].match (line):
# Ignore separators so that we don't
# include it in the commit message
if self.rev_separator is None:
self.rev_separator = line
else:
self.rev_separator += line + '\n'
return
# File Separator
if self.patterns['file-separator'].match (line):
# Ignore separators so that we don't
# include it in the commit message
if self.file_separator is None:
self.file_separator = line
else:
self.file_separator += line + '\n'
return
# File
match = self.patterns['file'].match (line)
if match:
self.flush ()
path = match.group (1)
path = path[len (self.root_path):]
path = path[:path.rfind (',')]
self.file = path
self.branches = {}
self.tags = {}
self.commit = None
self.file_separator = None
return
# Branch
match = self.patterns['branch'].match (line)
if match:
self.branches[match.group (2) + match.group (4)] = match.group (1)
return
# Tag (Keep this always after Branch pattern)
match = self.patterns['tag'].match (line)
if match:
revision = match.group (2)
# We are ignoring 1.1.1.1 revisions,
# so in case there's a tag pointing to that
# revision we have to redirect it to 1.1 revision
if revision == '1.1.1.1':
revision = '1.1'
self.tags.setdefault (revision, []).append (match.group (1))
return
# Revision
match = self.patterns['revision'].match (line)
if match and self.rev_separator is not None:
self._handle_commit ()
revision = match.group (1)
commit = Commit ()
# composed rev: revision + | + file path
# to make sure revision is unique
commit.composed_rev = True
commit.revision = "%s|%s" % (revision, self.file)
commit.tags = self.tags.get (revision, None)
self.commit = commit
self.rev_separator = None
return
# Commit info (date, author, etc.)
match = self.patterns['info'].match (line)
if match and self.commit is not None:
commit = self.commit
revision = commit.revision.split ('|')[0]
if revision == '1.1.1.1':
self.commit = None
return
commit.committer = Person ()
commit.committer.name = match.group (8)
self.handler.committer (commit.committer)
commit.date = datetime.datetime (int (match.group (1)), int (match.group (2)), int (match.group (3)),
int (match.group (4)), int (match.group (5)), int (match.group (6)))
if match.group (10) is not None:
self.lines[commit.revision] = (int (match.group (11)), int (match.group (12)))
else:
self.lines[commit.revision] = (0, 0)
action = Action ()
act = match.group (9)
if act == 'dead':
action.type = 'D'
self.file = self.file.replace ('/Attic', '')
commit.revision = commit.revision.replace ('/Attic', '')
elif revision == '1.1':
action.type = 'A'
else:
action.type = 'M'
action.f1 = self.file
# Branch
try:
last_dot = revision.rfind ('.')
prefix = revision[:last_dot]
branch = self.branches[prefix]
if self.file_added_on_branch and \
self.file_added_on_branch == prefix and \
revision[last_dot + 1:] == '1':
action.type = 'A'
self.file_added_on_branch = None
except KeyError:
branch = 'trunk'
commit.branch = branch
commit.actions.append (action)
return
# Branches
match = self.patterns['branches'].match (line)
if match:
if self.commit is None:
return
action = self.commit.actions[0]
revision = self.commit.revision.split ('|')[0]
if action.type == 'D' and revision == '1.1':
# File added on a branch
self.file_added_on_branch = match.group (1)
# Discard this commit
self.commit = None
return
# Message.
if self.commit is not None:
if self.rev_separator is not None:
# Previous separator was probably a
# false positive
self.commit.message += self.rev_separator + '\n'
self.rev_separator = None
if self.file_separator is not None:
# Previous separator was probably a
# false positive
self.commit.message += self.file_separator + '\n'
self.file_separator = None
self.commit.message += line + '\n'
| gpl-2.0 | 3,152,438,898,054,378,500 | 32.679245 | 146 | 0.510252 | false |
MTASZTAKI/ApertusVR | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/tools/genv8constants.py | 1 | 2791 | #!/usr/bin/env python
#
# genv8constants.py output_file libv8_base.a
#
# Emits v8dbg constants stored in libv8_base.a in a format suitable for the V8
# ustack helper.
#
import re
import subprocess
import sys
import errno
if len(sys.argv) != 3:
print "usage: objsym.py outfile libv8_base.a"
sys.exit(2);
outfile = file(sys.argv[1], 'w');
try:
pipe = subprocess.Popen([ 'objdump', '-z', '-D', sys.argv[2] ],
bufsize=-1, stdout=subprocess.PIPE).stdout;
except OSError, e:
if e.errno == errno.ENOENT:
print '''
Node.js compile error: could not find objdump
Check that GNU binutils are installed and included in PATH
'''
else:
print 'problem running objdump: ', e.strerror
sys.exit()
pattern = re.compile('([0-9a-fA-F]{8}|[0-9a-fA-F]{16}) <(.*)>:');
v8dbg = re.compile('^v8dbg.*$')
numpattern = re.compile('^[0-9a-fA-F]{2} $');
octets = 4
outfile.write("""
/*
* File automatically generated by genv8constants. Do not edit.
*
* The following offsets are dynamically from libv8_base.a. See src/v8ustack.d
* for details on how these values are used.
*/
#ifndef V8_CONSTANTS_H
#define V8_CONSTANTS_H
""");
curr_sym = None;
curr_val = 0;
curr_octet = 0;
def out_reset():
global curr_sym, curr_val, curr_octet
curr_sym = None;
curr_val = 0;
curr_octet = 0;
def out_define():
global curr_sym, curr_val, curr_octet, outfile, octets
if curr_sym != None:
wrapped_val = curr_val & 0xffffffff;
if curr_val & 0x80000000 != 0:
wrapped_val = 0x100000000 - wrapped_val;
outfile.write("#define %s -0x%x\n" % (curr_sym.upper(), wrapped_val));
else:
outfile.write("#define %s 0x%x\n" % (curr_sym.upper(), wrapped_val));
out_reset();
for line in pipe:
if curr_sym != None:
#
# This bit of code has nasty knowledge of the objdump text output
# format, but this is the most obvious robust approach. We could almost
# rely on looking at numbered fields, but some instructions look very
# much like hex numbers (e.g., "adc"), and we don't want to risk picking
# those up by mistake, so we look at character-based columns intead.
#
for i in range (0, 3):
# 6-character margin, 2-characters + 1 space for each field
idx = 6 + i * 3;
octetstr = line[idx:idx+3]
if curr_octet > octets:
break;
if not numpattern.match(octetstr):
break;
curr_val += int('0x%s' % octetstr, 16) << (curr_octet * 8);
curr_octet += 1;
match = pattern.match(line)
if match == None:
continue;
# Print previous symbol
out_define();
v8match = v8dbg.match(match.group(2));
if v8match != None:
out_reset();
curr_sym = match.group(2);
# Print last symbol
out_define();
outfile.write("""
#endif /* V8_CONSTANTS_H */
""");
| mit | 8,205,209,713,110,499,000 | 23.482456 | 79 | 0.63454 | false |
mibitzi/pwm | pwm/windows.py | 1 | 12841 | # Copyright (c) 2013 Michael Bitzi
# Licensed under the MIT license http://opensource.org/licenses/MIT
from contextlib import contextmanager
from functools import wraps
import struct
from pwm.ffi.xcb import xcb
from pwm.config import config
import pwm.atom
import pwm.events
import pwm.workspaces
import pwm.color
import pwm.rules
managed = {}
focused = None
MANAGED_EVENT_MASK = (xcb.EVENT_MASK_ENTER_WINDOW |
xcb.EVENT_MASK_FOCUS_CHANGE |
xcb.EVENT_MASK_PROPERTY_CHANGE)
class Info:
def __init__(self):
# Some UnmapNotifyEvents, like those generated when switching
# workspaces, have to be ignored. This indicates how many future
# UnmapNotifyEvents have to be ignored.
self.ignore_unmaps = 0
self.floating = False
self.fullscreen = False
self.urgent = False
self.workspace = None
self.geometry = None
def create(x, y, width, height, mask=None):
"""Create a new window and return its id."""
wid = xcb.core.generate_id()
if not mask:
mask = xcb.mask([(xcb.CW_BACK_PIXEL, xcb.screen.black_pixel),
(xcb.CW_EVENT_MASK, xcb.EVENT_MASK_EXPOSURE)])
xcb.core.create_window(
xcb.screen.root_depth,
wid,
xcb.screen.root,
x, y,
width,
height,
0, # border
xcb.WINDOW_CLASS_INPUT_OUTPUT,
xcb.screen.root_visual,
*mask)
return wid
def destroy(wid):
"""Destroy the window."""
xcb.core.destroy_window(wid)
def manage(wid, only_if_mapped=False):
if wid in managed:
return
attr = xcb.core.get_window_attributes(wid).reply()
if only_if_mapped and attr.map_state != xcb.MAP_STATE_VIEWABLE:
return
# Don't manage windows with the override_redirect flag.
if attr.override_redirect:
return
info = Info()
managed[wid] = info
info.floating = should_float(wid)
update_geometry(wid, force=True)
state = get_property(wid, "_NET_WM_STATE")
if state and pwm.atom.get("_NET_WM_STATE_FULLSCREEN") in state:
info.fullscreen = True
change_attributes(wid, [(xcb.CW_EVENT_MASK, MANAGED_EVENT_MASK)])
pwm.workspaces.current().add_window(wid)
info.workspace = pwm.workspaces.current()
focus(wid)
def unmanage(wid):
if wid not in managed:
return
ws = managed[wid].workspace
ws.remove_window(wid)
del managed[wid]
if focused == wid:
focus(pwm.workspaces.current().top_focus_priority())
def manage_existing():
"""Go through all existing windows and manage them."""
# Get the tree of windows whose parent is the root window (= all)
reply = xcb.core.query_tree(xcb.screen.root).reply()
children = xcb.query_tree_children(reply)
for i in range(xcb.query_tree_children_length(reply)):
manage(children[i], True)
def should_float(wid):
"""Try to determine if a window should be placed on the floating layer."""
if pwm.rules.floating(wid):
return True
# Check the _NET_WM_WINDOW_TYPE property to determine the type of this
# window.
# See the specification for more info:
# http://standards.freedesktop.org/wm-spec/wm-spec-latest.html
wintype = get_property(wid, "_NET_WM_WINDOW_TYPE")
if not wintype:
return False
for wt in wintype:
if (wt == pwm.atom.get("_NET_WM_WINDOW_TYPE_DIALOG") or
wt == pwm.atom.get("_NET_WM_WINDOW_TYPE_UTILITY") or
wt == pwm.atom.get("_NET_WM_WINDOW_TYPE_TOOLBAR") or
wt == pwm.atom.get("_NET_WM_WINDOW_TYPE_SPLASH")):
return True
return False
def update_geometry(wid, force=False):
# We only want the geometry if this window is floating
if force or managed[wid].floating:
managed[wid].geometry = get_geometry(wid)
def is_mapped(wid):
"""Return True if the window is mapped, otherwise False."""
attr = xcb.core.get_window_attributes(wid).reply()
return attr.map_state == xcb.MAP_STATE_VIEWABLE
def change_attributes(wid, masks):
"""Set attributes for the given window."""
xcb.core.change_window_attributes(wid, *xcb.mask(masks))
def get_name(wid):
"""Get the window name."""
name = get_property(wid, "_NET_WM_NAME")
if not name:
name = get_property(wid, xcb.ATOM_WM_NAME)
return name or ""
def get_property(wid, atom):
"""Get a property of this window."""
if isinstance(atom, str):
atom = pwm.atom.get(atom)
reply = xcb.core.get_property(False, wid, atom,
xcb.GET_PROPERTY_TYPE_ANY, 0,
2 ** 32 - 1).reply()
# We want to turn the value into something useful.
# In particular, if the format of the reply is 8, then assume that it is a
# string. Moreover, it could be a list of null terminated strings.
# Otherwise, the format must be a list of integers.
value = xcb.get_property_value(reply)
if reply.format == 8:
value = xcb.ffi.cast("char*", value)
#if 0 in value[:-1]:
# ret = []
# s = []
# for o in value:
# if o == 0:
# ret.append(''.join(s))
# s = []
# else:
# s.append(chr(o))
#else:
ret = xcb.ffi.string(value, reply.value_len).decode("UTF-8")
return ret
elif reply.format in (16, 32):
value = xcb.ffi.cast("uint%d_t*" % reply.format, value)
return [value[i] for i in range(reply.value_len)]
return None
def set_property(wid, atom, value, proptype=None):
fmt = 0
data = []
datalen = 0
if not isinstance(value, (list, tuple)):
value = [value]
if isinstance(value[0], str):
fmt = 8
data = b"\x00".join(val.encode("UTF-8") for val in value)
datalen = len(data)
if not proptype:
proptype = pwm.atom.get("UTF8_STRING")
elif isinstance(value[0], int):
fmt = 32
datalen = len(value)
data = struct.pack("{}I".format(datalen), *value)
if not proptype:
# We just assume it's an atom, but it could also be something else.
proptype = xcb.ATOM_ATOM
if isinstance(atom, str):
atom = pwm.atom.get(atom)
xcb.core.change_property(xcb.PROP_MODE_REPLACE, wid, atom, proptype, fmt,
datalen, data)
def configure(wid, **kwargs):
"""Configure the window and set the given variables in relation to the
workspace.
Arguments can be: x, y, width, height, stackmode
If absolute=True then the window will be configured in absolute coordinates
and not in relation to the workspace.
"""
workspace = pwm.workspaces.current()
values = []
abs_ = 0 if kwargs.get("absolute", False) else 1
border = (kwargs["borderwidth"] if "borderwidth" in kwargs
else config.window.border)
values.append((xcb.CONFIG_WINDOW_BORDER_WIDTH, border))
# We need to cast x and y in order to have correct handling for negative
# values.
if "x" in kwargs:
values.append(
(xcb.CONFIG_WINDOW_X,
xcb.ffi.cast("uint32_t", int(workspace.x*abs_ + kwargs["x"]))))
if "y" in kwargs:
values.append(
(xcb.CONFIG_WINDOW_Y,
xcb.ffi.cast("uint32_t", int(workspace.y*abs_ + kwargs["y"]))))
if "width" in kwargs:
values.append((xcb.CONFIG_WINDOW_WIDTH,
max(0, int(kwargs["width"] - 2*border))))
if "height" in kwargs:
values.append((xcb.CONFIG_WINDOW_HEIGHT,
max(0, int(kwargs["height"] - 2*border))))
if "stackmode" in kwargs:
values.append((xcb.CONFIG_WINDOW_STACK_MODE, kwargs["stackmode"]))
if "sibling" in kwargs:
values.append((xcb.CONFIG_WINDOW_SIBLING, kwargs["sibling"]))
xcb.core.configure_window(wid, *xcb.mask(values))
if wid in managed and "noupdate" not in kwargs:
if ("x" in kwargs or "y" in kwargs or
"width" in kwargs or "height" in kwargs):
update_geometry(wid)
def get_geometry(wid, absolute=False):
"""Get geometry information for the given window.
Return a tuple(x, y, width, height).
"""
geo = xcb.core.get_geometry(wid).reply()
if not absolute:
ws = pwm.workspaces.current()
geo.x -= ws.x
geo.y -= ws.y
# Because borders are not included in width/height and because we
# subtracted them when configuring we have to add them again.
geo.width += 2*geo.border_width
geo.height += 2*geo.border_width
return (geo.x, geo.y, geo.width, geo.height)
def preferred_geometry(wid, workspace=None):
"""Return the preferd geometry for this window."""
if not workspace:
workspace = pwm.workspaces.current()
# We will use the last known geometry.
_, _, width, height = managed[wid].geometry
# There should be some minimum size.
width = max(10, width)
height = max(10, height)
# Just center the window.
x = (workspace.width - width) / 2
y = (workspace.height - height) / 2
return x, y, width, height
def create_client_message(wid, atom, *data):
vals = [
xcb.CLIENT_MESSAGE,
32, # Format
0, # Sequence
wid,
atom
]
# Every X11 event is 32 bytes long, of which 20 bytes (5 ints) are data.
# We need to fill up the bytes which *data did not use with zeros.
for i in range(5):
vals.append(data[i] if i < len(data) else 0)
return struct.pack("BBHII5I", *vals)
def kill(wid):
"""Kill the window with wid."""
# Check if the window supports WM_DELETE_WINDOW, otherwise kill it
# the hard way.
atom = pwm.atom.get("WM_DELETE_WINDOW")
if atom in get_property(wid, "WM_PROTOCOLS"):
event = create_client_message(
wid,
pwm.atom.get("WM_PROTOCOLS"),
atom,
xcb.CURRENT_TIME)
xcb.core.send_event(False, wid, xcb.EVENT_MASK_NO_EVENT, event)
else:
xcb.core.kill_client(wid)
def focus(wid):
"""Focus the window with the given wid.
events.focus_changed will be fired with the new focused window as
parameter. If no window was focused or the window was not found
the event will be fired with None as parameter.
"""
global focused
win = wid if wid in managed else None
if focused == win:
return
if focused:
_handle_focus(focused, False)
focused = win
if focused:
_handle_focus(focused, True)
managed[wid].workspace.handle_focus(wid)
pwm.events.focus_changed(win)
def _handle_focus(wid, focused):
"""Set border color and input focus according to focus."""
border = None
if focused:
border = pwm.color.get_pixel(config.window.focused)
else:
border = pwm.color.get_pixel(config.window.unfocused)
change_attributes(wid, [(xcb.CW_BORDER_PIXEL, border)])
if focused:
xcb.core.set_input_focus(xcb.INPUT_FOCUS_POINTER_ROOT,
wid,
xcb.TIME_CURRENT_TIME)
# Focused floating windows should always be at the top.
if managed[wid].floating:
configure(wid, stackmode=xcb.STACK_MODE_ABOVE)
# Focusing a window should remove its urgency flag
if managed[wid].urgent:
managed[wid].urgent = False
def toggle_urgent(wid):
urgent = not managed[wid].urgent
if urgent:
border = pwm.color.get_pixel(config.window.urgent)
elif wid == focused:
border = pwm.color.get_pixel(config.window.focused)
else:
border = pwm.color.get_pixel(config.window.unfocused)
change_attributes(wid, [(xcb.CW_BORDER_PIXEL, border)])
managed[wid].urgent = urgent
if urgent:
pwm.events.window_urgent_set(wid)
@contextmanager
def no_enter_notify_event():
"""Prevent all managed windows from sending EnterNotifyEvent."""
eventmask = MANAGED_EVENT_MASK
eventmask &= ~xcb.EVENT_MASK_ENTER_WINDOW
for wid in managed:
change_attributes(wid, [(xcb.CW_EVENT_MASK, eventmask)])
yield
for wid in managed:
change_attributes(wid, [(xcb.CW_EVENT_MASK, MANAGED_EVENT_MASK)])
def only_if_focused(func):
"""A decorator to call the function only if there is a focused window.
The function will receive 2 additional parameters, the focused window and
its workspace.
"""
@wraps(func)
def wrapper(*args, **kwargs):
focused = pwm.windows.focused
if focused:
func(focused, managed[focused].workspace, *args, **kwargs)
return wrapper
| mit | 3,560,447,319,417,299,500 | 26.321277 | 79 | 0.609454 | false |
agrif/django-cannen | cannen/management/commands/runcannenftp.py | 1 | 7841 | # This file is part of Cannen, a collaborative music player.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth import authenticate
from django.utils import autoreload
from pyftpdlib import ftpserver
from optparse import make_option
import errno
import StringIO
import os
import logging
import cannen
from cannen.models import UserSong, User, add_song_and_file
# helper to add to queue when file is closed
class FTPUploadedFile(StringIO.StringIO):
def __init__(self, user, name):
StringIO.StringIO.__init__(self)
self.ftp_user = user
self.ftp_name = name
# to appease pyftpdlib, because StringIO does not have this attr
# this shows up in the Handler as the file name
self.name = self
def close(self):
pass # it will be closed by the handler!
# constructor for not permitted errors
def NotPermittedError(io=False):
if io:
return IOError(errno.EPERM, "Operation not permitted.")
else:
return OSError(errno.EPERM, "Operation not permitted.")
class CannenFilesystem(ftpserver.AbstractedFS):
def __init__(self, root, cmd_channel):
super(CannenFilesystem, self).__init__('/', cmd_channel)
self.user = User.objects.get(username=cmd_channel.username)
# all paths are the same
def ftp2fs(self, ftppath):
return ftppath
def fs2ftp(self, fspath):
return fspath
def validpath(self, path):
return True
# helper to return a UserSong model from an FTP path
def get_model(self, path):
if len(path) <= 1:
return None
songs = UserSong.objects.exclude(file=None).filter(owner__id=self.user.id, url__endswith=path[1:])
try:
return songs[0]
except IndexError:
return None
# the big one, oooooopeeeeeen fiiile!
def open(self, filename, mode):
# for some reason filename is *relative!!*
# so no cutting off the /
if '+' in mode or 'a' in mode:
# we don't do read/write or append here. begone!
raise NotPermittedError(True)
if 'r' in mode:
model = self.get_model(filename)
if not model:
raise NotPermittedError(True)
model.file.file.open(mode)
return model.file.file
if 'w' in mode:
return FTPUploadedFile(self.user, filename)
raise NotPermittedError(True)
# a bunch of useless things from os.*
def mkdir(self, path):
raise NotPermittedError()
def rmdir(self, path):
raise NotPermittedError()
def rename(self, src, dest):
raise NotPermittedError()
def chmod(self, path, mode):
raise NotPermittedError()
def readlink(self, path):
raise NotPermittedError()
# implementations of os.*
def listdir(self, path):
if path != '/':
raise NotPermittedError()
files = UserSong.objects.exclude(file=None).filter(owner__id=self.user.id)
return [f.url.rsplit('/', 1)[1].encode("UTF-8") for f in files]
def chdir(self, path):
if path != '/':
raise NotPermittedError()
def remove(self, path):
model = self.get_model(path)
if model:
model.delete()
else:
raise OSError(errno.ENOENT, "No such file or directory.")
def stat(self, path):
model = self.get_model(path)
if model:
size = model.file.file.size
return os.stat_result((0664, 0, 0, 0, 0, 0, size, 0, 0, 0))
else:
raise OSError(errno.ENOENT, "No such file or directory.")
def lstat(self, path):
return self.stat(path)
# needed so that stat() isn't dumb
def get_user_by_uid(self, uid):
return self.cmd_channel.username
def get_group_by_gid(self, gid):
return self.cmd_channel.username
# replacements for os.path.*
def isfile(self, path):
if len(path) >= 1:
if path[1:] in self.listdir('/'):
return True
return False
def islink(self, path):
return False
def isdir(self, path):
return path == "/"
def getsize(self, path):
return 0
def getmtime(self, path):
return 0
def realpath(self, path):
return path
def lexists(self, path):
True
# temp maker, not used here (though maybe it should be?)
def mkstemp(self, suffix="", prefix="", dir=None, mode='wb'):
raise NotPermittedError()
class CannenAuthorizer(ftpserver.DummyAuthorizer):
def validate_authentication(self, username, password):
if not authenticate(username=username, password=password):
return False
try:
# add auth perms
# e - change directory
# l - list files
# r - retrieve files
# d - delete files
# w - store files
self.add_user(username, 'notthepassword', '.', perm='elrdw')
except (ftpserver.AuthorizerError, ValueError):
pass
return True
class CannenHandler(ftpserver.FTPHandler):
authorizer = CannenAuthorizer()
abstracted_fs = CannenFilesystem
banner = "cannen {0} mass uploader".format(cannen.__version__)
def on_file_received(self, file):
# add the model!
data = file.getvalue()
uploaded_file = SimpleUploadedFile(file.ftp_name, data, content_type=None)
song, _ = add_song_and_file(file.ftp_user, uploaded_file)
StringIO.StringIO.close(file)
def on_incomplete_file_received(self, file):
# do nothing!
StringIO.StringIO.close(file)
def main(host, port):
# create the server
handler = CannenHandler
server = ftpserver.FTPServer((host, port), handler)
server.max_cons = 256
server.max_cons_per_ip = 2
server.serve_forever()
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True, help='Tells Django to NOT use the auto-reloader.'),
)
args = '[optional port number, or ipaddr:port]'
help = 'runs the cannen background ftp upload server'
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("Usage is runcannenftp {0}.".format(self.args))
if len(args) == 0:
host = ''
port = 8021
else:
arg = args[0]
try:
if ':' in arg:
host, port = arg.rsplit(':', 1)
port = int(port)
else:
host = ''
port = int(arg)
except ValueError:
raise CommandError("\"{0}\" is not a valid port number or address:port pair.".format(args[0]))
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(main, (host, port))
else:
main(host, port)
| gpl-3.0 | 8,713,329,083,339,235,000 | 33.091304 | 142 | 0.60579 | false |
WilJoey/tn_ckan | ckan/tests/lib/test_email_notifications.py | 1 | 2338 | '''Tests for the ckan.lib.email_notifications module.
Note that email_notifications is used by an action function, so most of the
tests for the module are done by testing the action function in
ckan.test.functional.api. This test module contains some additional unit tests.
'''
import datetime
import nose.tools
import ckan.lib.email_notifications as email_notifications
import ckan.logic as logic
def test_string_to_time_delta():
assert email_notifications.string_to_timedelta('1 day') == (
datetime.timedelta(days=1))
assert email_notifications.string_to_timedelta('1 day') == (
datetime.timedelta(days=1))
assert email_notifications.string_to_timedelta('2 days') == (
datetime.timedelta(days=2))
assert email_notifications.string_to_timedelta('2\tdays') == (
datetime.timedelta(days=2))
assert email_notifications.string_to_timedelta('14 days') == (
datetime.timedelta(days=14))
assert email_notifications.string_to_timedelta('4:35:00') == (
datetime.timedelta(hours=4, minutes=35, seconds=00))
assert email_notifications.string_to_timedelta('4:35:12.087465') == (
datetime.timedelta(hours=4, minutes=35, seconds=12,
milliseconds=87, microseconds=465))
assert email_notifications.string_to_timedelta('1 day, 3:23:34') == (
datetime.timedelta(days=1, hours=3, minutes=23, seconds=34))
assert email_notifications.string_to_timedelta('1 day, 3:23:34') == (
datetime.timedelta(days=1, hours=3, minutes=23, seconds=34))
assert email_notifications.string_to_timedelta('7 days, 3:23:34') == (
datetime.timedelta(days=7, hours=3, minutes=23, seconds=34))
assert email_notifications.string_to_timedelta('7 days,\t3:23:34') == (
datetime.timedelta(days=7, hours=3, minutes=23, seconds=34))
assert email_notifications.string_to_timedelta(
'7 days, 3:23:34.087465') == datetime.timedelta(days=7, hours=3,
minutes=23, seconds=34, milliseconds=87, microseconds=465)
assert email_notifications.string_to_timedelta('.123456') == (
datetime.timedelta(milliseconds=123, microseconds=456))
nose.tools.assert_raises(logic.ValidationError,
email_notifications.string_to_timedelta, 'foobar')
| mit | 2,694,704,394,452,680,000 | 49.826087 | 79 | 0.680924 | false |
aurigadl/EnvReactAsk | server/apiMarcas/marca_test.py | 1 | 8010 | import unittest
import requests
import json
import random
import string
class TestMarcaRest(unittest.TestCase):
def setUp(self):
self.domain = 'mi.co'
self.URL = 'http://localhost:5000/'
self.test = 'test@' + self.domain
self.password = '1234Abcd'
self.admon = 'admon@' + self.domain
self.passAdmin = 'Abcd1234'
# Validate user access with the role of "candidate" and
# create a new marca
def test_setNewMarca(self):
path1 = 'apiFuec/newMarca' # Only candidate role
name_marca = ''.join(random.choice(string.ascii_letters) for x in range(10))
# Save session
reqsess = requests.Session()
# Login user testName_0 that has role candidate
payload = dict(email=self.admon, password=self.passAdmin, password_c=self.passAdmin)
result = reqsess.post(self.URL + 'apiUser/login', json=payload)
answer_json = json.loads(result.text)
token = answer_json['token']
header = {'Authorization': token}
# json format correct without params
payload = {"jsonrpc": "2.0", "method": path1, "params": ""}
r = reqsess.post(self.URL + path1, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'No exist parameters')
# json format with params but marca doesn't complete
params = {'marca': 'A'}
payload = {"jsonrpc": "2.0", "method": path1, "params": params}
r = reqsess.post(self.URL + path1, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'Marca is not complete')
# json format with params but marca doesn't complete
params = {'marca': name_marca}
payload = {"jsonrpc": "2.0", "method": path1, "params": params}
r = reqsess.post(self.URL + path1, json=payload, headers=header)
self.assertEqual(r.status_code, 201, 'Marca is ok')
answer_json = json.loads(r.text)
id = answer_json['id']
self.assertTrue(str(id).isdigit(), 'Marca id created')
# json format with params but marca doesn't complete
params = {'marca': name_marca}
payload = {"jsonrpc": "2.0", "method": path1, "params": params}
r = reqsess.post(self.URL + path1, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'Marca exist')
# Validate user access with the role of "candidate" and
# create a new marca
def test_getallMarca(self):
path1 = 'apiFuec/allMarca' # Only candidate role
# Save session
reqsess = requests.Session()
# Login user testName_0 that has role candidate
payload = dict(email=self.admon, password=self.passAdmin, password_c=self.passAdmin)
result = reqsess.post(self.URL + 'apiUser/login', json=payload)
answer_json = json.loads(result.text)
token = answer_json['token']
header = {'Authorization': token}
# json format correct without params
payload = {"jsonrpc": "2.0", "method": path1, "params": ""}
r = reqsess.get(self.URL + path1, json=payload, headers=header)
self.assertEqual(r.status_code, 200, 'Marca is ok')
answer_json = json.loads(r.text)
dict_result = answer_json['result']
self.assertTrue((type(dict_result) is list), 'Marca get all')
# Update user parameters with id
def test_apiUpdateIdMarca(self):
path = 'apiFuec/updateIdMarca'
path1 = 'apiFuec/newMarca' # Only candidate role
# Save session
reqsess = requests.Session()
name_marca = ''.join(random.choice(string.ascii_letters) for x in range(10))
# Login user admonUser that has role candidate
payload = dict(email=self.admon, password=self.passAdmin, password_c=self.passAdmin)
result = reqsess.post(self.URL + 'apiUser/login', json=payload)
answer_json = json.loads(result.text)
token = answer_json['token']
header = {'Authorization': token}
# create new marca
params = {'marca': name_marca}
payload = {"jsonrpc": "2.0", "method": path1, "params": params}
r = reqsess.post(self.URL + path1, json=payload, headers=header)
self.assertEqual(r.status_code, 201, 'Marca is ok')
answer_json = json.loads(r.text)
id = answer_json['id']
self.assertTrue(str(id).isdigit(), 'Marca id created')
# test api with different params
params = {}
payload = {"jsonrpc": "2.0", "method": path, "params": params}
r = reqsess.put(self.URL + path, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'Error json format - not parameters')
# test api with different params, without id
params = {'name': ''}
payload = {"jsonrpc": "2.0", "method": path, "params": params}
r = reqsess.put(self.URL + path, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'Error json format - empty parameters')
# test api with different params
params = {'id': '1000', 'marca': 'Audi'}
payload = {"jsonrpc": "2.0", "method": path, "params": params}
r = reqsess.put(self.URL + path, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'Error json format - empty parameters')
# test api with different params
params = {'id': str(id), 'name': name_marca + '_new'}
payload = {"jsonrpc": "2.0", "method": path, "params": params}
r = reqsess.put(self.URL + path, json=payload, headers=header)
self.assertEqual(r.status_code, 200, 'Marca update ok')
# Update user parameters with id
def test_apiDeleteIdMarca(self):
path = 'apiFuec/deleteIdMarca'
path1 = 'apiFuec/newMarca' # Only candidate role
# Save session
reqsess = requests.Session()
name_marca = ''.join(random.choice(string.ascii_letters) for x in range(10))
# Login user admonUser that has role candidate
payload = dict(email=self.admon, password=self.passAdmin, password_c=self.passAdmin)
result = reqsess.post(self.URL + 'apiUser/login', json=payload)
answer_json = json.loads(result.text)
token = answer_json['token']
header = {'Authorization': token}
# create new marca
params = {'marca': name_marca}
payload = {"jsonrpc": "2.0", "method": path1, "params": params}
r = reqsess.post(self.URL + path1, json=payload, headers=header)
self.assertEqual(r.status_code, 201, 'Marca is ok')
answer_json = json.loads(r.text)
id = answer_json['id']
self.assertTrue(str(id).isdigit(), 'Marca id created')
# test api with different params
params = {}
payload = {"jsonrpc": "2.0", "method": path, "params": params}
r = reqsess.delete(self.URL + path, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'Error json format - not parameters')
# test api with different params, without id
params = {'id': ''}
payload = {"jsonrpc": "2.0", "method": path, "params": params}
r = reqsess.delete(self.URL + path, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'Error json format - empty parameters')
# test api with different params
params = {'id': '1000'}
payload = {"jsonrpc": "2.0", "method": path, "params": params}
r = reqsess.delete(self.URL + path, json=payload, headers=header)
self.assertEqual(r.status_code, 400, 'Error json format - empty parameters')
# test api with different params
params = {'id': str(id)}
payload = {"jsonrpc": "2.0", "method": path, "params": params}
r = reqsess.delete(self.URL + path, json=payload, headers=header)
self.assertEqual(r.status_code, 200, 'Marcar delete ok')
suite = unittest.TestLoader().loadTestsFromTestCase(TestMarcaRest)
unittest.TextTestRunner(verbosity=2).run(suite) | gpl-3.0 | -5,338,638,280,150,986,000 | 42.538043 | 92 | 0.619351 | false |
martinloland/rov | pc/ui.py | 1 | 7085 | '''
ui.py
- Create and update the user interface
'''
BLACK = (0,0,0)
WHITE = (255,255,255)
GRAY = (60,60,60)
GRAYtrans = (60,60,60,150)
STROKE = 2
BIGSTROKE = 7
t=l=0
c=1
b=r=2
class UI:
def __init__(self):
self.nbCirclesR = 0
self.nbCirclesL = 0
self.overlay = True
self.info = False
self.motorInfo = False
self.joystick = False
def splash(self):
self.splashImg = pygame.image.load(os.path.join('img', 'splash.jpg'))
surface.blit(self.splashImg,(0,0))
pygame.display.flip()
def create(self):
ui.splash()
self.circDia = int(surface.get_height()*0.8/4)
self.circSpace = int(surface.get_height()*0.2/5)
#Elements
self.infoScreen = Info()
self.motors = Motors()
self.video = Video(0.5, 0.5, c, c)
self.RollPitch = RollPitch(0.5, 0.5, c, c)
#Circles
#self.pitch = Circle(r, 'Pitch', -90, 90, None)
#self.roll = Circle(r, 'Roll', -90, 90, None)
self.yaw = Circle(r, 'Yaw', 0, 360, None)
self.volt = Circle(r, 'Volt', 0, 13.0, 'v')
#self.curr = Circle(r, 'Curr', 0, 20, 'A')
self.temp= Circle(r, 'Temp', 4, 30, 'C')
self.depth= Circle(r, 'Depth', 0, 10, 'm')
self.led= Circle(l, 'Led', 0, 1, None)
self.pan= Circle(l, 'Pan', 0, 180, None)
self.tilt= Circle(l, 'Tilt', 0, 180, None)
self.pwr= Circle(l, 'Power', 0, 190, None)
def update(self):
surface.fill(BLACK)
self.video.draw()
if self.info:
self.infoScreen.draw()
else:
if self.motorInfo:
self.motors.draw()
if self.overlay:
self.RollPitch.draw()
#Circles
#self.pitch.draw(sens.pitch)
#self.roll.draw(sens.roll)
self.yaw.draw(sens.yaw)
self.volt.draw(sens.volt)
#self.curr.draw(sens.curr)
self.temp.draw(sens.temp)
self.depth.draw(sens.depth)
self.led.draw(act.led)
self.pan.draw(act.pan)
self.tilt.draw(act.tilt)
self.pwr.draw(act.pwr)
pygame.display.flip()
class Info:
def __init__(self):
self.room = 20
self.width = surface.get_width() - 2*self.room
self.height = surface.get_height() - 2*self.room
def draw(self):
self.face = pygame.Surface((self.width,self.height), pygame.SRCALPHA, 32)
self.face = self.face.convert_alpha()
self.face.fill(GRAYtrans)
if ui.joystick:
self.keyboard = pygame.image.load(os.path.join('img', 'controller.png'))
else:
self.keyboard = pygame.image.load(os.path.join('img', 'keyboard.png'))
self.keyboardy = self.height - self.room - self.keyboard.get_height()
self.face.blit(self.keyboard,(self.room,self.keyboardy))
surface.blit(self.face,(self.room,self.room))
class Motors:
def __init__(self):
self.bg = pygame.image.load(os.path.join('img', 'motors.png'))
self.width = self.bg.get_width()
self.height = self.bg.get_height()
self.x = ui.circSpace*2 + ui.circDia
self.y = surface.get_height()-ui.circSpace-self.height
self.xStart = [61, 45, 132, 220, 204]
self.yStart = [29, 127, 184, 127, 29]
def draw(self):
self.face = pygame.Surface((self.width,self.height), pygame.SRCALPHA, 32)
self.face.blit(self.bg,(0,0))
#text
value = [act.lf, act.lb, act.cb, act.rb, act.rf]
self.font = pygame.font.Font(None, 30)
for i in range(0,len(value)):
self.printValue = "%.0f" % value[i]
self.text = self.font.render(self.printValue, 1, WHITE)
self.textw = self.text.get_rect()[2]
self.face.blit(self.text,(self.xStart[i]-self.textw /2,self.yStart[i]+4))
surface.blit(self.face,(self.x,self.y))
class Circle:
def __init__(self, side, att, min, max, unit):
if side == r:
self.nb = ui.nbCirclesR
self.xstart = surface.get_width()-ui.circSpace-ui.circDia
ui.nbCirclesR += 1
elif side == l:
self.nb = ui.nbCirclesL
self.xstart = ui.circSpace
ui.nbCirclesL += 1
if unit:
self.unit = unit
else:
self.unit = ''
self.att = att
self.max = max
self.min = min
self.dia = ui.circDia
self.rad = int(self.dia/2)
self.ystart = ui.circSpace+(ui.circSpace+ui.circDia)*self.nb
def draw(self, value):
self.face = pygame.Surface((self.dia,self.dia), pygame.SRCALPHA, 32)
self.face = self.face.convert_alpha()
self.rect = self.face.get_rect()
# Semi transparent circle
pygame.draw.circle(self.face, GRAYtrans, (self.rad,self.rad), self.rad, 0)
# Stroke circles
self.percent = (float(value)-self.min)/(self.max-self.min)
self.start = math.pi/2
self.end = math.pi/2+2*math.pi*self.percent
pygame.draw.arc(self.face, GRAY, self.rect, 0, 8, BIGSTROKE)
pygame.draw.arc(self.face, WHITE, self.rect, self.start, self.end, BIGSTROKE)
# Attribute text
self.attFont = pygame.font.Font(None, 30)
self.attText = self.attFont.render(self.att, 1, WHITE)
self.attTextw = self.attText.get_rect()[2]
self.face.blit(self.attText,((self.dia-self.attTextw)/2,self.dia*0.27))
# Value
self.valueFont = pygame.font.Font(None, 50)
self.printValue = "%.2f" % value + self.unit #Round to two decimal places
self.valueText = self.valueFont.render(self.printValue, 1, WHITE)
self.valueTextw = self.valueText.get_rect()[2]
self.face.blit(self.valueText,((self.dia-self.valueTextw)/2,self.dia*0.47))
surface.blit(self.face,(self.xstart,self.ystart))
class Video:
def __init__(self, i, j, horJust, verJust):
self.img = None
def draw(self):
surface.blit(pygame.transform.rotate(self.img, 180),(0,0))
class RollPitch:
def __init__(self, i, j, horJust, verJust):
# Common
self.dia = surface.get_height()*0.6
self.rad = int(self.dia/2)
self.cor = getXY(i, j, horJust, verJust, self.dia, self.dia)
# Lines / Pitch
self.spacing = int(self.rad*0.4)
self.lines = createLines(self.dia, self.spacing)
self.lineW = self.lines.get_width()
self.lineH = self.lines.get_height()
self.lineCorx = int((self.dia-self.lineW)/2)
self.lineCory = int((self.dia-self.lineH)/2)
def draw(self):
self.face = pygame.Surface((self.dia,self.dia), pygame.SRCALPHA, 32)
self.face = self.face.convert_alpha()
self.rect = self.face.get_rect()
#Circle / Roll
pygame.draw.arc(self.face, GRAY, self.rect, 0, 8, STROKE+1)
pygame.draw.arc(self.face, WHITE, self.rect, math.pi, math.pi*2, STROKE+1)
offset = sens.pitch/10*self.spacing
self.face.blit(self.lines,(self.lineCorx,self.lineCory+offset))
surface.blit(rot_center(self.face,sens.roll),(self.cor[0],self.cor[1]))
def createLines(diameter, spacing):
lineWidth = int(diameter*0.8)
lines = 17
height = spacing*(lines-1)
bg = pygame.Surface((lineWidth,height), pygame.SRCALPHA, 32)
bg = bg.convert_alpha()
y=0
for line in range(0,lines):
if line == (lines-1)/2:
x = 0
else:
x = int(lineWidth*0.35)
pygame.draw.line(bg, WHITE, (x,y),(lineWidth-x,y),STROKE)
y = y + spacing
return bg
def createSurface():
pygame.init()
[SCREEN_WIDTH, SCREEN_HEIGHT] = [1296,730]
surface = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
pygame.display.set_caption('ROV UI')
return surface | mit | -363,425,104,164,392,600 | 29.220264 | 79 | 0.639661 | false |
henrytao-me/openerp.positionq | addons/positionq/pq_salary/pq_tcc2.py | 1 | 1594 | # -*- coding: utf-8 -*-
from openerp.osv import osv, fields
from openerp.tools.translate import _
import logging
from datetime import datetime
from openerp.osv.fields import datetime as datetime_field
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from unidecode import unidecode
import types
class pq_tcc2(osv.osv):
_name = 'pq.tcc2'
_description = 'Tieu Chi Cap 2'
_columns = {
'name': fields.char('Tên tiêu chí', size=128, required=True),
'tcc1': fields.many2one('pq.tcc1', string="Tiêu chí cấp 1", required=True, ondelete="cascade"),
'trong_so': fields.float('Trọng số', digits=(16,2)),
'create_date': fields.datetime('Ngày giờ tạo', readonly=True),
'user_id': fields.many2one('res.users', string="Người tạo",readonly=True),
}
_defaults = {
'trong_so': lambda *x: 0,
'user_id': lambda self, cr, uid, context=None: uid,
}
def create(self, cr, uid, vals, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_tcc2, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_tcc2, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_tcc2, self).unlink(cr, uid, ids, context)
pq_tcc2()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 519,983,520,925,627,140 | 33.955556 | 103 | 0.642721 | false |
FirstAidKitten/Roguelike-Sandbox | ai.py | 1 | 1113 | import session
from fov import generate
from random import randint
class Aggressive:
# AI for a basic monster.
def take_turn(self):
# a basic monster takes its turn. If you can see it, it can see you
monster = self.owner
monster_fov = generate(monster.x, monster.y)
if (session.player.x, session.player.y) in monster_fov:
# move towards player if far away
if monster.distance_to(session.player) >= 2:
monster.move_towards(session.player.x, session.player.y)
# close enough, attack! (if the player is still alive.)
elif session.player.stats['hp'] > 0:
monster.attack(session.player)
class Neutral:
# AI for a basic monster.
def take_turn(self):
monster = self.owner
monster_fov = generate(monster.x, monster.y)
if (session.player.x, session.player.y) in monster_fov:
if randint(0, 100) < 80:
monster.move(randint(-1, 1), randint(-1, 1))
else:
monster.move_towards(session.player.x, session.player.y)
| mit | 6,383,235,628,075,894,000 | 33.78125 | 75 | 0.604672 | false |
marcua/qurk_experiments | qurkexp/join/movie_comparisons.py | 1 | 4332 | #!/usr/bin/env python
import sys, os
ROOT = os.path.abspath('%s/../..' % os.path.abspath(os.path.dirname(__file__)))
sys.path.append(ROOT)
os.environ['DJANGO_SETTINGS_MODULE'] = 'qurkexp.settings'
from django.core.management import setup_environ
from django.conf import settings
from qurkexp.join.models import *
from qurkexp.join.movie_results import get_actor_scenes
from qurkexp.hitlayer.models import HitLayer
from random import shuffle, seed
def get_params():
if len(sys.argv) < 7:
print 'not enough arguments!'
print 'args: [cmp|rating] join_exp_name actorid batchsize sortsize nassignments runname'
exit()
sort_type = sys.argv[1]
join_exp = sys.argv[2]
actorid = int(sys.argv[3])
batch_size = int(sys.argv[4])
sort_size = int(sys.argv[5])
nassignments = int(sys.argv[6])
run_name = sys.argv[7]
sceneids = get_actor_scenes(actorid, join_exp)
sceneids = map(lambda x: x[0],sceneids)
if sort_type not in ["cmp", "rating"]:
raise Exception("sort type not matched")
if sort_type == "rating" and sort_size != 1:
raise Exception("Ratings can only have 1 item in each group")
return (sort_type, actorid, sceneids, batch_size, sort_size, nassignments, run_name)
def generate_vals(sceneids):
vals = [CompVal(sortval=i, data=sceneid) for i, sceneid in enumerate(sceneids)]
map(lambda x: x.save(), vals)
return vals
def generate_pairs(sceneids):
vals = generate_vals(sceneids)
seed(1)
shuffle(vals)
pairs = []
for i,l in enumerate(vals[:len(vals)-1]):
for r in vals[i+1:]:
pairs.append((l,r))
return pairs
def generate_group(pairs, sort_size, batch):
groupvals = set()
rempairs = []
for pair in pairs:
sp = set(pair)
if len(groupvals | sp) <= sort_size:
groupvals |= sp
rempairs.append(pair)
else:
break
for pair in rempairs:
pairs.remove(pair)
group = CompGroup(batch=batch)
group.save()
for val in groupvals:
group.vals.add(val)
return (group, pairs)
def comparison_batches(sceneids, exp):
pairs = generate_pairs(sceneids)
batch = CompBatch(experiment=exp)
while len(pairs) > 0:
batch.save()
(group, pairs) = generate_group(pairs, exp.sort_size, batch)
if batch.compgroup_set.all().count() == exp.batch_size:
batch = CompBatch(experiment=exp)
def rating_batches(sceneids, exp):
vals = generate_vals(sceneids)
seed(1)
shuffle(vals)
batch = CompBatch(experiment=exp)
for val in vals:
batch.save()
group = CompGroup(batch=batch)
group.save()
group.vals.add(val)
if batch.compgroup_set.all().count() == exp.batch_size:
batch = CompBatch(experiment=exp)
def post_batches(exp):
if exp.sort_type == "cmp":
desc = "Sort how flattering movie scenes are"
elif exp.sort_type == "rating":
desc = "Rate how flattering movie scenes are"
for b in exp.compbatch_set.all():
hitid = HitLayer.get_instance().create_job("/celeb/movie/sort/%d" % (b.id),
('sort', [b.id]),
desc = desc,
title = desc,
price = 0.01,
nassignments = nassignments)
if __name__ == "__main__":
(sort_type, actorid, sceneids, batch_size, sort_size, nassignments, run_name) = get_params()
exp = CompExperiment(run_name=run_name, batch_size=batch_size,
sort_size=sort_size, sort_type=sort_type,
item_type='movie_%d' % actorid)
exp.save()
if sort_type == "cmp":
comparison_batches(sceneids, exp)
elif sort_type == "rating":
rating_batches(sceneids, exp)
post_batches(exp)
# movie_all_naive_10_1
# movie_all_naive_5_1
# movie_all_smart_2_1
# movie_all_smart_3_1
# movie_all_smart_5_1
#for actorid in range(1,5+1):
# "python movie_comparisons.py cmp movie_all_naive_5_1 %d 5 5 5 movie_cmp_%d_5_5_5" % actorid
# "python movie_comparisons.py rating movie_all_naive_5_1 %d 5 1 5 movie_rat_%d_5_1_5" % actorid
| bsd-3-clause | 7,571,860,350,783,712,000 | 30.620438 | 96 | 0.592567 | false |
aliyun/aliyun-oss-python-sdk | examples/upload.py | 1 | 3341 | # -*- coding: utf-8 -*-
import os
import random
import string
import oss2
# 以下代码展示了文件上传的高级用法,如断点续传、分片上传等。
# 基本的文件上传如上传普通文件、追加文件,请参见object_basic.py
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
def random_string(n):
return ''.join(random.choice(string.ascii_lowercase) for i in range(n))
# 生成一个本地文件用于测试。文件内容是bytes类型。
filename = random_string(32) + '.txt'
content = oss2.to_bytes(random_string(1024 * 1024))
with open(filename, 'wb') as fileobj:
fileobj.write(content)
"""
断点续传上传
"""
# 断点续传一:因为文件比较小(小于oss2.defaults.multipart_threshold),
# 所以实际上用的是oss2.Bucket.put_object
oss2.resumable_upload(bucket, 'remote-normal.txt', filename)
# 断点续传二:为了展示的需要,我们指定multipart_threshold可选参数,确保使用分片上传
oss2.resumable_upload(bucket, 'remote-multipart.txt', filename, multipart_threshold=100 * 1024)
"""
分片上传
"""
# 也可以直接调用分片上传接口。
# 首先可以用帮助函数设定分片大小,设我们期望的分片大小为128KB
total_size = os.path.getsize(filename)
part_size = oss2.determine_part_size(total_size, preferred_size=128 * 1024)
# 初始化分片上传,得到Upload ID。接下来的接口都要用到这个Upload ID。
key = 'remote-multipart2.txt'
upload_id = bucket.init_multipart_upload(key).upload_id
# 逐个上传分片
# 其中oss2.SizedFileAdapter()把fileobj转换为一个新的文件对象,新的文件对象可读的长度等于size_to_upload
with open(filename, 'rb') as fileobj:
parts = []
part_number = 1
offset = 0
while offset < total_size:
size_to_upload = min(part_size, total_size - offset)
result = bucket.upload_part(key, upload_id, part_number,
oss2.SizedFileAdapter(fileobj, size_to_upload))
parts.append(oss2.models.PartInfo(part_number, result.etag, size = size_to_upload, part_crc = result.crc))
offset += size_to_upload
part_number += 1
# 完成分片上传
bucket.complete_multipart_upload(key, upload_id, parts)
# 验证一下
with open(filename, 'rb') as fileobj:
assert bucket.get_object(key).read() == fileobj.read()
os.remove(filename)
| mit | 3,825,733,195,884,948,500 | 26.882979 | 114 | 0.715376 | false |
luoguizhou/gooderp_addons | warehouse/models/scan_barcode.py | 1 | 3228 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import UserError
class ScanBarcode(models.Model):
_name = 'scan.barcode'
scan_barcode_input_code = fields.Char(string='输入要扫描的条码')
def prepare_line_out_data(self, line_out_ids):
line_data = {}
for line in line_out_ids:
line_data.update({line.goods_id.barcode: line})
return line_data
def contract_barcode_line_data(self, line_out_data, vals, code):
line_out_list = []
att = self.env['attribute'].search([('ean', '=', code)])
goods = self.env['goods'].search([('barcode', '=', code)])
if not att and not goods:
return {'warning': {'title': u'警告', 'message': u'不存在条码为 %s 的商品' % code}}
self.env['wh.move'].check_barcode(self._name, self.id, att, goods)
conversion = att and att.goods_id.conversion or goods.conversion
move, create_line, val = self.env['wh.move'].scan_barcode_each_model_operation(self._name, self.id, att,
goods,
conversion)
if not line_out_data.get(code):
if not create_line:
line_out_list.append(
(0, 0, self.env['wh.move'].prepare_move_line_data(att, val, goods, move)))
for currency_code, line in line_out_data.iteritems():
if isinstance(line.id, int):
if currency_code == code:
line_out_list.append((1, line.id,
{'goods_qty': line.goods_qty + 1}))
else:
line_out_list.append((4, line.id, False))
else:
currency_vals = {}
for val in vals:
currency_vals.update({val: line[val]})
if currency_code == code:
currency_vals.update({'goods_qty': line.goods_qty + 1})
line_out_list.append((0, 0, currency_vals))
else:
line_out_list.append((0, 0, currency_vals))
return line_out_list
@api.multi
@api.onchange('scan_barcode_input_code')
def onchange_scan_barcode_input_code(self):
vals = ['cost_unit', 'uos_id', 'goods_id', 'warehouse_dest_id', 'goods_uos_qty', 'warehouse_id', 'uom_id',
'goods_qty', 'attribute_id', 'price_taxed', 'tax_rate', 'type', 'move_id']
if self.scan_barcode_input_code:
if ' ' in self.scan_barcode_input_code:
code_list = self.scan_barcode_input_code.split(' ')
else:
code_list = [self.scan_barcode_input_code]
for code in code_list:
line_out_data = self.prepare_line_out_data(self.line_out_ids)
line_out_list = self.contract_barcode_line_data(
line_out_data, vals, code)
if isinstance(line_out_list, dict):
return line_out_list
self.line_out_ids = line_out_list
self.scan_barcode_input_code = u''
| agpl-3.0 | -6,958,131,263,197,142,000 | 47.333333 | 114 | 0.512539 | false |
kevinwchang/Minecraft-Overviewer | overviewer_core/aux_files/genPOI.py | 1 | 21247 | #!/usr/bin/env python2
'''
genPOI.py
Scans regionsets for TileEntities and Entities, filters them, and writes out
POI/marker info.
A markerSet is list of POIs to display on a tileset. It has a display name,
and a group name.
markersDB.js holds a list of POIs in each group
markers.js holds a list of which markerSets are attached to each tileSet
'''
import gzip
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import time
import urllib2
import datetime
from collections import defaultdict
from contextlib import closing
from multiprocessing import Pool
from optparse import OptionParser
from overviewer_core import logger
from overviewer_core import nbt
from overviewer_core import configParser, world
from overviewer_core.files import FileReplacer, get_fs_caps
UUID_LOOKUP_URL = 'https://sessionserver.mojang.com/session/minecraft/profile/'
def replaceBads(s):
"Replaces bad characters with good characters!"
bads = [" ", "(", ")"]
x=s
for bad in bads:
x = x.replace(bad,"_")
return x
# If you want to keep your stomach contents do not, under any circumstance,
# read the body of the following function. You have been warned.
# All of this could be replaced by a simple json.loads if Mojang had
# introduced a TAG_JSON, but they didn't.
#
# So here are a few curiosities how 1.7 signs get seen in 1.8 in Minecraft:
# - null ->
# - "null" -> null
# - ["Hello"] -> Hello
# - [Hello] -> Hello
# - [1,2,3] -> 123
# Mojang just broke signs for everyone who ever used [, { and ". GG.
def jsonText(s):
if s is None or s == "null":
return ""
if (s.startswith('"') and s.endswith('"')) or \
(s.startswith('{') and s.endswith('}')):
try:
js = json.loads(s)
except ValueError:
return s
def parseLevel(foo):
bar = ""
if isinstance(foo, list):
for extra in foo:
bar += parseLevel(extra)
elif isinstance(foo, dict):
if "text" in foo:
bar += foo["text"]
if "extra" in foo:
bar += parseLevel(foo["extra"])
elif isinstance(foo, basestring):
bar = foo
return bar
return parseLevel(js)
else:
return s
# Since functions are not pickleable, we send their names instead.
# Here, set up worker processes to have a name -> function map
bucketChunkFuncs = {}
def initBucketChunks(config_path):
global bucketChunkFuncs
mw_parser = configParser.MultiWorldParser()
mw_parser.parse(config_path)
# ought not to fail since we already did it once
config = mw_parser.get_validated_config()
for name, render in config['renders'].iteritems():
for f in render['markers']:
ff = f['filterFunction']
bucketChunkFuncs[ff.__name__] = ff
# yes there's a double parenthesis here
# see below for when this is called, and why we do this
# a smarter way would be functools.partial, but that's broken on python 2.6
# when used with multiprocessing
def parseBucketChunks((bucket, rset, filters)):
global bucketChunkFuncs
pid = multiprocessing.current_process().pid
markers = defaultdict(list)
i = 0
cnt = 0
mcnt_prev = 0
for b in bucket:
try:
data = rset.get_chunk(b[0],b[1])
for poi in itertools.chain(data['TileEntities'], data['Entities']):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign':
poi = signWrangler(poi)
for name, filter_function in filters:
ff = bucketChunkFuncs[filter_function]
result = ff(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name].append(d)
except nbt.CorruptChunkError:
logging.warning("Ignoring POIs in corrupt chunk %d,%d", b[0], b[1])
# Perhaps only on verbose ?
i = i + 1
if i == 250:
i = 0
cnt = 250 + cnt
mcnt = sum(len(v) for v in markers.itervalues())
if mcnt > mcnt_prev:
logging.info("Found %d markers in thread %d so far at %d chunks", mcnt, pid, cnt);
mcnt_prev = mcnt
return markers
def signWrangler(poi):
"""
Just does the JSON things for signs
"""
for field in ["Text1", "Text2", "Text3", "Text4"]:
poi[field] = jsonText(poi[field])
return poi
def handleEntities(rset, config, config_path, filters, markers):
"""
Add markers for Entities or TileEntities.
For this every chunk of the regionset is parsed and filtered using multiple
processes, if so configured.
This function will not return anything, but it will update the parameter
`markers`.
"""
logging.info("Looking for entities in %r", rset)
numbuckets = config['processes'];
if numbuckets < 0:
numbuckets = multiprocessing.cpu_count()
if numbuckets == 1:
for (x, z, mtime) in rset.iterate_chunks():
try:
data = rset.get_chunk(x, z, entities_only=True)
for poi in itertools.chain(data['TileEntities'], data['Entities']):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign': # kill me
poi = signWrangler(poi)
for name, __, filter_function, __, __, __ in filters:
result = filter_function(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name]['raw'].append(d)
except nbt.CorruptChunkError:
logging.warning("Ignoring POIs in corrupt chunk %d,%d", x,z)
else:
buckets = [[] for i in range(numbuckets)];
for (x, z, mtime) in rset.iterate_chunks():
i = x / 32 + z / 32
i = i % numbuckets
buckets[i].append([x, z])
for b in buckets:
logging.info("Buckets has %d entries", len(b));
# Create a pool of processes and run all the functions
pool = Pool(processes=numbuckets, initializer=initBucketChunks, initargs=(config_path,))
# simplify the filters dict, so pickle doesn't have to do so much
filters = [(name, filter_function.__name__) for name, __, filter_function, __, __, __ in filters]
results = pool.map(parseBucketChunks, ((buck, rset, filters) for buck in buckets))
logging.info("All the threads completed")
for marker_dict in results:
for name, marker_list in marker_dict.iteritems():
markers[name]['raw'].extend(marker_list)
logging.info("Done.")
class PlayerDict(dict):
use_uuid = False
_name = ''
uuid_cache = None # A cache for the UUID->profile lookups
@classmethod
def load_cache(cls, outputdir):
cache_file = os.path.join(outputdir, "uuidcache.dat")
if os.path.exists(cache_file):
try:
with closing(gzip.GzipFile(cache_file)) as gz:
cls.uuid_cache = json.load(gz)
logging.info("Loaded UUID cache from %r with %d entries",
cache_file, len(cls.uuid_cache.keys()))
except (ValueError, IOError):
logging.warning("Failed to load UUID cache -- it might be corrupt")
cls.uuid_cache = {}
corrupted_cache = cache_file + ".corrupted." + datetime.datetime.now().isoformat()
try:
os.rename(cache_file, corrupted_cache)
logging.warning("If %s does not appear to contain meaningful data, you may safely delete it", corrupted_cache)
except OSError:
logging.warning("Failed to backup corrupted UUID cache")
logging.info("Initialized an empty UUID cache")
else:
cls.uuid_cache = {}
logging.info("Initialized an empty UUID cache")
@classmethod
def save_cache(cls, outputdir):
cache_file = os.path.join(outputdir, "uuidcache.dat")
caps = get_fs_caps(outputdir)
with FileReplacer(cache_file, caps) as cache_file_name:
with closing(gzip.GzipFile(cache_file_name, "wb")) as gz:
json.dump(cls.uuid_cache, gz)
logging.info("Wrote UUID cache with %d entries",
len(cls.uuid_cache.keys()))
def __getitem__(self, item):
if item == "EntityId":
if not super(PlayerDict, self).has_key("EntityId"):
if self.use_uuid:
super(PlayerDict, self).__setitem__("EntityId", self.get_name_from_uuid())
else:
super(PlayerDict, self).__setitem__("EntityId", self._name)
return super(PlayerDict, self).__getitem__(item)
def get_name_from_uuid(self):
sname = self._name.replace('-','')
try:
profile = PlayerDict.uuid_cache[sname]
if profile['retrievedAt'] > time.mktime(self['time']):
return profile['name']
except (KeyError,):
pass
try:
profile = json.loads(urllib2.urlopen(UUID_LOOKUP_URL + sname).read())
if 'name' in profile:
profile['retrievedAt'] = time.mktime(time.localtime())
PlayerDict.uuid_cache[sname] = profile
return profile['name']
except (ValueError, urllib2.URLError):
logging.warning("Unable to get player name for UUID %s", self._name)
def handlePlayers(worldpath, filters, markers):
"""
Add markers for players to the list of markers.
For this the player files under the given `worldpath` are parsed and
filtered.
This function will not return anything, but it will update the parameter
`markers`.
"""
playerdir = os.path.join(worldpath, "playerdata")
useUUIDs = True
if not os.path.isdir(playerdir):
playerdir = os.path.join(worldpath, "players")
useUUIDs = False
if os.path.isdir(playerdir):
playerfiles = os.listdir(playerdir)
playerfiles = [x for x in playerfiles if x.endswith(".dat")]
isSinglePlayer = False
else:
playerfiles = [os.path.join(worldpath, "level.dat")]
isSinglePlayer = True
for playerfile in playerfiles:
try:
data = PlayerDict(nbt.load(os.path.join(playerdir, playerfile))[1])
data.use_uuid = useUUIDs
if isSinglePlayer:
data = data['Data']['Player']
except (IOError, TypeError):
logging.warning("Skipping bad player dat file %r", playerfile)
continue
playername = playerfile.split(".")[0]
if isSinglePlayer:
playername = 'Player'
data._name = playername
if useUUIDs:
data['uuid'] = playername
# Position at last logout
data['id'] = "Player"
data['x'] = int(data['Pos'][0])
data['y'] = int(data['Pos'][1])
data['z'] = int(data['Pos'][2])
# Time at last logout, calculated from last time the player's file was modified
data['time'] = time.localtime(os.path.getmtime(os.path.join(playerdir, playerfile)))
# Spawn position (bed or main spawn)
if "SpawnX" in data:
# Spawn position (bed or main spawn)
spawn = PlayerDict()
spawn.use_uuid = useUUIDs
spawn._name = playername
spawn["id"] = "PlayerSpawn"
spawn["x"] = data['SpawnX']
spawn["y"] = data['SpawnY']
spawn["z"] = data['SpawnZ']
for name, __, filter_function, rset, __, __ in filters:
# get the dimension for the filter
# This has do be done every time, because we have filters for
# different regionsets.
if rset.get_type():
dimension = int(re.match(r"^DIM(_MYST)?(-?\d+)$", rset.get_type()).group(2))
else:
dimension = 0
if data['Dimension'] == dimension:
result = filter_function(data)
if result:
d = create_marker_from_filter_result(data, result)
markers[name]['raw'].append(d)
if dimension == 0 and "SpawnX" in data:
result = filter_function(spawn)
if result:
d = create_marker_from_filter_result(spawn, result)
markers[name]['raw'].append(d)
def handleManual(manualpois, filters, markers):
"""
Add markers for manually defined POIs to the list of markers.
This function will not return anything, but it will update the parameter
`markers`.
"""
for poi in manualpois:
for name, __, filter_function, __, __, __ in filters:
result = filter_function(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name]['raw'].append(d)
def create_marker_from_filter_result(poi, result):
"""
Takes a POI and the return value of a filter function for it and creates a
marker dict depending on the type of the returned value.
"""
# every marker has a position either directly via attributes x, y, z or
# via tuple attribute Pos
if 'Pos' in poi:
d = dict((v, poi['Pos'][i]) for i, v in enumerate('xyz'))
else:
d = dict((v, poi[v]) for v in 'xyz')
# read some Defaults from POI
if "icon" in poi:
d["icon"] = poi['icon']
if "image" in poi:
d["image"] = poi['image']
if "createInfoWindow" in poi:
d["createInfoWindow"] = poi['createInfoWindow']
# Fill in the rest from result
if isinstance(result, basestring):
d.update(dict(text=result, hovertext=result))
elif isinstance(result, tuple):
d.update(dict(text=result[1], hovertext=result[0]))
# Dict support to allow more flexible things in the future as well as polylines on the map.
elif isinstance(result, dict):
d['text'] = result['text']
# Use custom hovertext if provided...
if 'hovertext' in result:
d['hovertext'] = unicode(result['hovertext'])
else: # ...otherwise default to display text.
d['hovertext'] = result['text']
if 'polyline' in result and hasattr(result['polyline'], '__iter__'):
d['polyline'] = []
for point in result['polyline']:
d['polyline'].append(dict(x=point['x'], y=point['y'], z=point['z'])) # point.copy() would work, but this validates better
if isinstance(result['color'], basestring):
d['strokeColor'] = result['color']
if "icon" in result:
d["icon"] = result['icon']
if "image" in result:
d["image"] = result['image']
if "createInfoWindow" in result:
d["createInfoWindow"] = result['createInfoWindow']
else:
raise ValueError("got an %s as result for POI with id %s" % (type(result).__name__, poi['id']))
return d
def main():
if os.path.basename(sys.argv[0]) == """genPOI.py""":
helptext = """genPOI.py
%prog --config=<config file> [options]"""
else:
helptext = """genPOI
%prog --genpoi --config=<config file> [options]"""
logger.configure()
parser = OptionParser(usage=helptext)
parser.add_option("-c", "--config", dest="config", action="store",
help="Specify the config file to use.")
parser.add_option("-q", "--quiet", dest="quiet", action="count",
help="Reduce logging output")
parser.add_option("--skip-scan", dest="skipscan", action="store_true",
help="Skip scanning for entities when using GenPOI")
parser.add_option("--skip-players", dest="skipplayers", action="store_true",
help="Skip getting player data when using GenPOI")
options, args = parser.parse_args()
if not options.config:
parser.print_help()
return
if options.quiet > 0:
logger.configure(logging.WARN, False)
# Parse the config file
mw_parser = configParser.MultiWorldParser()
mw_parser.parse(options.config)
try:
config = mw_parser.get_validated_config()
except Exception:
logging.exception("An error was encountered with your configuration. See the info below.")
return 1
destdir = config['outputdir']
# saves us from creating the same World object over and over again
worldcache = {}
filters = set()
marker_groups = defaultdict(list)
# collect all filters and get regionsets
for rname, render in config['renders'].iteritems():
# Convert render['world'] to the world path, and store the original
# in render['worldname_orig']
try:
worldpath = config['worlds'][render['world']]
except KeyError:
logging.error("Render %s's world is '%s', but I could not find a corresponding entry in the worlds dictionary.",
rname, render['world'])
return 1
render['worldname_orig'] = render['world']
render['world'] = worldpath
# find or create the world object
if (render['world'] not in worldcache):
w = world.World(render['world'])
worldcache[render['world']] = w
else:
w = worldcache[render['world']]
# get the regionset for this dimension
rset = w.get_regionset(render['dimension'][1])
if rset == None: # indicates no such dimension was found:
logging.warn("Sorry, you requested dimension '%s' for the render '%s', but I couldn't find it", render['dimension'][0], rname)
continue
# find filters for this render
for f in render['markers']:
# internal identifier for this filter
name = replaceBads(f['name']) + hex(hash(f['filterFunction']))[-4:] + "_" + hex(hash(rname))[-4:]
# add it to the list of filters
filters.add((name, f['name'], f['filterFunction'], rset, worldpath, rname))
# add an entry in the menu to show markers found by this filter
group = dict(groupName=name,
displayName = f['name'],
icon=f.get('icon', 'signpost_icon.png'),
createInfoWindow=f.get('createInfoWindow', True),
checked = f.get('checked', False))
marker_groups[rname].append(group)
# initialize the structure for the markers
markers = dict((name, dict(created=False, raw=[], name=filter_name))
for name, filter_name, __, __, __, __ in filters)
# apply filters to regionsets
if not options.skipscan:
# group filters by rset
keyfunc = lambda x: x[3]
sfilters = sorted(filters, key=keyfunc)
for rset, rset_filters in itertools.groupby(sfilters, keyfunc):
handleEntities(rset, config, options.config, list(rset_filters), markers)
# apply filters to players
if not options.skipplayers:
PlayerDict.load_cache(destdir)
# group filters by worldpath, so we only search for players once per
# world
keyfunc = lambda x: x[4]
sfilters = sorted(filters, key=keyfunc)
for worldpath, worldpath_filters in itertools.groupby(sfilters, keyfunc):
handlePlayers(worldpath, list(worldpath_filters), markers)
# add manual POIs
# group filters by name of the render, because only filter functions for
# the current render should be used on the current render's manualpois
keyfunc = lambda x: x[5]
sfilters = sorted(filters, key=keyfunc)
for rname, rname_filters in itertools.groupby(sfilters, keyfunc):
manualpois = config['renders'][rname]['manualpois']
handleManual(manualpois, list(rname_filters), markers)
logging.info("Done handling POIs")
logging.info("Writing out javascript files")
if not options.skipplayers:
PlayerDict.save_cache(destdir)
with open(os.path.join(destdir, "markersDB.js"), "w") as output:
output.write("var markersDB=")
json.dump(markers, output, indent=2)
output.write(";\n");
with open(os.path.join(destdir, "markers.js"), "w") as output:
output.write("var markers=")
json.dump(marker_groups, output, indent=2)
output.write(";\n");
with open(os.path.join(destdir, "baseMarkers.js"), "w") as output:
output.write("overviewer.util.injectMarkerScript('markersDB.js');\n")
output.write("overviewer.util.injectMarkerScript('markers.js');\n")
output.write("overviewer.util.injectMarkerScript('regions.js');\n")
output.write("overviewer.collections.haveSigns=true;\n")
logging.info("Done")
if __name__ == "__main__":
main()
| gpl-3.0 | 2,242,482,086,399,224,600 | 36.210158 | 138 | 0.584035 | false |
krishauser/Klampt | Python/python2_version/klampt/apps/klampt_browser.py | 1 | 30664 | from klampt import *
from klampt.io import loader,resource
from klampt.math import se3
from klampt.model.trajectory import Trajectory,RobotTrajectory
from klampt.model.multipath import MultiPath
from klampt.model import types
from klampt import vis
from klampt.vis.qtbackend import QtGLWindow
from klampt.vis.glcommon import GLMultiViewportProgram
import sys,os,time
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
world_item_extensions = set(['.obj','.rob','.urdf','.env'])
robot_override_types = ['Config','Configs']
animation_types = ['Trajectory','LinearPath','MultiPath']
create_types = resource.visualEditTypes()[:-1]
def save(obj,fn):
if hasattr(obj,'saveFile'):
return obj.saveFile(fn)
if hasattr(obj,'save'):
return obj.save(fn)
type = loader.filenameToType(fn)
return loader.save(obj,type,fn)
MAX_VIS_ITEMS = 1000
MAX_VIS_CACHE = 10
def copyCamera(cam,camDest):
camDest.rot = cam.rot[:]
camDest.tgt = cam.tgt[:]
camDest.dist = cam.dist
class MyMultiViewportProgram(GLMultiViewportProgram):
def __init__(self):
GLMultiViewportProgram.__init__(self)
self.animating = False
self.animationTime = 0
self.animationDuration = 0
self.animationStartTime = 0
self.items = dict()
def startAnim(self):
self.animating = True
self.animationStartTime = time.time()
self.idlesleep(0)
def stopAnim(self):
self.animating = False
self.idlesleep(float('inf'))
def setAnimTime(self,t):
self.stopAnim()
self.animationTime = t
self._updateTime(t)
def _updateTime(self,t):
#print "_updateTime",t
def clearStartTime(v):
v.animationStartTime = 0
for n,subapp in v.subAppearances.iteritems():
clearStartTime(subapp)
for (k,item) in self.items.iteritems():
item.plugin.animationTime(self.animationTime)
for (k,v) in item.plugin.items.iteritems():
#do animation updates
clearStartTime(v)
v.updateAnimation(t)
self.refresh()
def idlefunc(self):
if not self.animating:
GLMultiViewportProgram.idlefunc(self)
return
t = time.time()
self.animationTime = t - self.animationStartTime
if self.animationDuration == 0:
self.animationTime = 0
else:
self.animationTime = self.animationTime % self.animationDuration
self._updateTime(self.animationTime)
class ResourceItem:
def __init__(self,obj):
self.obj = obj
self.plugin = None
self.program = None
self.animationBuddy = None
class ResourceBrowser(QtWidgets.QMainWindow):
def __init__(self,glwindow=None,parent=None):
QtWidgets.QMainWindow.__init__(self,parent)
# Splitter to show 2 views in same widget easily.
self.splitter = QtWidgets.QSplitter()
# The model.
self.model = QtWidgets.QFileSystemModel()
# You can setRootPath to any path.
self.model.setRootPath(QtCore.QDir.rootPath())
# Add filters
filters = []
print "ALLOWABLE FILE EXTENSIONS"
for k,v in loader.extensionToTypes.iteritems():
filters.append("*"+k)
print " ",k
filters.append("*.xml")
filters.append("*.json")
filters.append("*.txt")
filters.append("*.obj")
filters.append("*.rob")
filters.append("*.urdf")
filters.append("*.env")
self.model.setNameFilters(filters)
# Create the view in the splitter.
self.view = QtWidgets.QTreeView()
# Set the model of the view.
self.view.setModel(self.model)
#nicer size for columns
self.view.header().resizeSection(0, 200)
self.view.header().resizeSection(1, 75)
self.view.header().resizeSection(2, 75)
self.view.header().resizeSection(3, 150)
# Set the root index of the view as the user's home directory.
#self.view.setRootIndex(self.model.index(QtCore.QDir.homePath()))
self.view.setRootIndex(self.model.index(os.getcwd()))
self.view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.world = WorldModel()
self.tempWorld = WorldModel()
self.active = dict()
self.emptyVisPlugin = vis.VisualizationPlugin()
self.emptyVisPlugin.add("world",self.world)
self.emptyVisProgram = None
self.selected = set()
self.visCache = []
self.modified = set()
self.left = QtWidgets.QFrame()
self.right = QtWidgets.QFrame()
self.leftLayout = QtWidgets.QVBoxLayout()
self.left.setLayout(self.leftLayout)
self.upButton = QtWidgets.QPushButton("Up")
self.leftLayout.addWidget(self.upButton)
self.leftLayout.addWidget(self.view)
#visualization configuration
vbuttonLayout = QtWidgets.QHBoxLayout()
self.autoFitCameraButton = QtWidgets.QCheckBox("Auto-fit cameras")
self.lockCameraCheck = QtWidgets.QCheckBox("Lock cameras")
#self.overlayCheck = QtWidgets.QCheckBox("Overlay")
self.maxGridItemsLabel = QtWidgets.QLabel("Grid width")
self.maxGridItems = QtWidgets.QSpinBox()
self.maxGridItems.setRange(3,15)
self.autoFitCameraButton.setToolTip("If checked, the camera is automatically fit the the items in the scene")
self.lockCameraCheck.setToolTip("If checked, all cameras are navigated simultaneously")
#self.overlayCheck.setTooltip("If checked, all items are drawn on top of one another")
self.maxGridItems.setToolTip("Max # height/width in the visualization pane")
vbuttonLayout.addWidget(self.autoFitCameraButton)
vbuttonLayout.addWidget(self.lockCameraCheck)
#vbuttonLayout.addWidget(self.overlayCheck)
vbuttonLayout.addWidget(self.maxGridItemsLabel)
vbuttonLayout.addWidget(self.maxGridItems)
self.leftLayout.addLayout(vbuttonLayout)
#playback
self.timeDriver = QtWidgets.QSlider()
self.timeDriver.setOrientation(QtCore.Qt.Horizontal)
self.timeDriver.setRange(0,1000)
#self.timeDriver.setSizeHint()
self.playButton = QtWidgets.QPushButton("Play")
self.playButton.setCheckable(True)
self.stopButton = QtWidgets.QPushButton("Stop")
self.playButton.setToolTip("Starts/pauses playing any selected animations")
self.stopButton.setToolTip("Stops playing any selected animations")
label = QtWidgets.QLabel("Time")
label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
vbuttonLayout = QtWidgets.QHBoxLayout()
vbuttonLayout.addWidget(label)
vbuttonLayout.addWidget(self.timeDriver)
vbuttonLayout.addWidget(self.playButton)
vbuttonLayout.addWidget(self.stopButton)
self.leftLayout.addLayout(vbuttonLayout)
#editing
vbuttonLayout = QtWidgets.QHBoxLayout()
self.editButton = QtWidgets.QPushButton("Edit item")
self.saveButton = QtWidgets.QPushButton("Save item")
self.editButton.setToolTip("Pops up a dialog to edit the selected item, if available")
self.saveButton.setToolTip("Saves changes to edited items")
self.saveButton.setEnabled(False)
self.createComboBox = QtWidgets.QComboBox()
self.createComboBox.addItem("Create new item...")
for n in create_types:
self.createComboBox.addItem(n)
vbuttonLayout.addWidget(self.editButton)
vbuttonLayout.addWidget(self.saveButton)
vbuttonLayout.addWidget(self.createComboBox)
self.leftLayout.addLayout(vbuttonLayout)
#world configuration
vbuttonLayout = QtWidgets.QHBoxLayout()
self.addButton = QtWidgets.QPushButton("Add to world")
self.clearButton = QtWidgets.QPushButton("Clear world")
self.addButton.setToolTip("Adds the selected item(s) to the reference world")
self.clearButton.setToolTip("Clears the reference world")
vbuttonLayout.addWidget(self.addButton)
vbuttonLayout.addWidget(self.clearButton)
self.leftLayout.addLayout(vbuttonLayout)
self.splitter.addWidget(self.left)
self.splitter.addWidget(self.right)
self.splitter.setHandleWidth(7)
self.setCentralWidget(self.splitter)
self.rightLayout = QtWidgets.QVBoxLayout()
self.right.setLayout(self.rightLayout)
if glwindow is None:
self.glwidget = QtGLWindow("viewport")
else:
self.glwidget = glwindow
self.rightLayout.addWidget(self.glwidget)
self.glviewportManager = MyMultiViewportProgram()
self.glwidget.setProgram(self.glviewportManager)
self.glwidget.setParent(self.splitter)
self.glviewportManager.sizePolicy = 'squeeze'
self.glviewportManager.addView(self.emptyVisPlugin)
self.glviewportManager.items = self.active
self.emptyVisProgram = self.glviewportManager.views[-1]
self.glwidget.setFixedSize(QtWidgets.QWIDGETSIZE_MAX,QtWidgets.QWIDGETSIZE_MAX)
self.glwidget.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding))
self.glwidget.adjustSize()
self.glwidget.refresh()
self.upButton.clicked.connect(self.onUpClicked)
self.view.selectionModel().selectionChanged.connect(self.selection_changed)
self.view.doubleClicked.connect(self.onViewDoubleClick)
self.autoFitCameraButton.clicked.connect(self.onAutoFitCamera)
self.maxGridItems.valueChanged.connect(self.maxGridItemsChanged)
self.lockCameraCheck.toggled.connect(self.onLockCamerasToggled)
self.timeDriver.valueChanged.connect(self.timeDriverChanged)
self.playButton.toggled.connect(self.togglePlay)
self.stopButton.clicked.connect(self.stopPlay)
self.editButton.clicked.connect(self.onEditClicked)
self.saveButton.clicked.connect(self.onSaveClicked)
self.createComboBox.currentIndexChanged.connect(self.onCreateIndexChanged)
self.addButton.clicked.connect(self.onAddClicked)
self.clearButton.clicked.connect(self.onClearClicked)
def closeEvent(self,event):
if len(self.modified) > 0:
reply = QtWidgets.QMessageBox.question(self, "Unsaved changes", "Would you like to save changes to " + ', '.join(self.modified)+ "?",
QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No);
if reply == QtWidgets.QMessageBox.Yes:
self.onSaveClicked()
vis.show(False)
def onViewDoubleClick(self):
indices = self.view.selectedIndexes()
if len(indices) == 0: return
item = indices[0]
name = str(self.model.filePath(item))
oldselected = self.selected
self.selected = set([name])
self.onAddClicked()
self.selected = oldselected
def onUpClicked(self):
currentRoot = self.view.rootIndex()
self.view.setRootIndex(currentRoot.parent())
def selection_changed(self,newSelection,deselected):
#print "klampt_browser: Selection changed!"
for i in newSelection.indexes():
if i.column() == 0:
fn = str(i.model().filePath(i))
self.selected.add(fn)
self.add(fn)
#print " value:",fn
#print "klampt_browser: Deselected:"
for i in deselected.indexes():
if i.column() == 0:
fn = str(i.model().filePath(i))
self.selected.remove(fn)
self.remove(fn)
#klampt_browser: print " value:",fn
self.refresh()
def onAutoFitCamera(self):
if self.autoFitCameraButton.isChecked():
for (k,item) in self.active.iteritems():
vis.autoFitViewport(item.program.view,[self.world,item.obj])
def onLockCamerasToggled(self,on):
self.glviewportManager.broadcast = on
if on:
self.lockCameras()
def lockCameras(self):
view0 = self.glviewportManager.views[0].view
cam0 = view0.camera
for p in self.glviewportManager.views[1:]:
cam = p.view.camera
copyCamera(cam0,cam)
def timeDriverChanged(self,value):
u = value * 0.001
animTrajectoryTime = u*self.glviewportManager.animationDuration
for (k,item) in self.active.iteritems():
obj = item.obj
plugin = item.plugin
if item.animationBuddy is not None:
path = item.animationBuddy
if plugin.getItem(path).animation is None:
plugin.animate(path,obj,endBehavior='halt')
else:
anim = plugin.getItem(path).animation
plugin.pauseAnimation(False)
self.glviewportManager.setAnimTime(animTrajectoryTime)
def togglePlay(self,value):
self.animating = value
self.glviewportManager.refresh()
if value:
for (k,item) in self.active.iteritems():
obj = item.obj
plugin = item.plugin
if item.animationBuddy is not None:
plugin.animate(item.animationBuddy,obj,endBehavior='halt')
plugin.pauseAnimation(False)
self.glviewportManager.startAnim()
self.glviewportManager.refresh()
else:
#pause
self.glviewportManager.stopAnim()
#self.idlesleep(float('inf'))
def stopPlay(self):
self.playButton.setChecked(False)
#revert to no animations
for (k,item) in self.active.iteritems():
obj = item.obj
plugin = item.plugin
if isinstance(obj,(Trajectory,MultiPath)):
robotpath = ('world',self.world.robot(0).getName())
plugin.animate(robotpath,None)
plugin.pauseAnimation()
def onEditClicked(self):
if len(self.active) == 0:
QtWidgets.QMessageBox.warning(self.splitter,"Invalid item","No item selected, can't edit")
return
fn = sorted(self.active.keys())[0]
def doedit():
print "klampt_browser: Launching resource.edit",fn,"..."
try:
(save,obj) = resource.edit(name=fn,value=self.active[fn].obj,world=self.world)
except Exception as e:
print "klampt_browser: Exception raised during resource.edit:",e
QtWidgets.QMessageBox.warning(self.splitter,"Editing not available","Unable to edit item of type "+self.active[fn].obj.__class__.__name__)
return
if save and obj is not None:
self.active[fn].obj = obj
#mark it as modified and re-add it to the visualization
basename = os.path.basename(fn)
self.active[fn].plugin.add(basename,obj)
self.modified.add(fn)
self.saveButton.setEnabled(True)
QtCore.QTimer.singleShot(0,doedit)
def onSaveClicked(self):
for fn in self.modified:
if not save(self.active[fn].obj,fn):
print "klampt_browser: Error saving file",fn
self.modified = set()
self.saveButton.setEnabled(False)
def onCreateIndexChanged(self,item):
if item == 0: return
type = create_types[item-1]
robot = None
if self.world.numRobots() > 0:
robot = self.world.robot(0)
try:
(save,obj) = resource.edit("untitled",types.make(type,robot),type=type,world=self.world)
except Exception as e:
print "klampt_browser: Exception raised during resource.edit():",e
QtWidgets.QMessageBox.warning(self.splitter,"Creation not available","Unable to create item of type "+type+", did you remember to add items to the reference world?")
return
if obj is not None and save:
fn = resource.save(obj,type,directory='')
if fn is not None:
self.loadedItem(fn,obj)
#TODO: should we add to selection in tree view?
self.createComboBox.setCurrentIndex(0)
def onAddClicked(self):
if self.world.numIDs() == 0:
for name in self.selected:
if name not in self.active: continue
copyCamera(self.active[name].program.view.camera,self.emptyVisProgram.view.camera)
break
todel = []
for name in self.selected:
if name not in self.active: continue
s = self.active[name].obj
if isinstance(s,(RobotModel,RigidObjectModel,TerrainModel)):
self.world.add(s.getName(),s)
self.tempWorld.remove(s)
todel.append(name)
elif isinstance(s,WorldModel):
for i in xrange(s.numRobots()):
self.world.add(s.robot(i).getName(),s.robot(i))
for i in xrange(s.numRigidObjects()):
self.world.add(s.rigidObject(i).getName(),s.rigidObject(i))
for i in xrange(s.numTerrains()):
self.world.add(s.terrain(i).getName(),s.terrain(i))
for k,item in self.active.iteritems():
item.plugin.add("world",self.world)
todel.append(name)
elif isinstance(s,(TriangleMesh,PointCloud,GeometricPrimitive)):
t = self.world.makeTerrain(name)
t.geometry().set(Geometry3D(s))
todel.append(name)
elif isinstance(s,Geometry3D):
t = self.world.makeTerrain(name)
t.geometry().set(s.clone())
todel.append(name)
for name in todel:
self.remove(name)
if len(todel) > 0:
self.refresh()
def onClearClicked(self):
self.world = WorldModel()
self.tempWorld = WorldModel()
self.active = dict()
self.visCache = []
self.emptyVisPlugin.add("world",self.world)
self.refresh()
def add(self,fn,openDir=True,warn=True):
#assert fn not in self.active
if fn in self.active:
print "add(): Warning, file",fn,"is already active"
return
for i,(cfn,citem) in enumerate(self.visCache):
if cfn == fn:
print
print "klampt_browser: PULLED",fn,"FROM CACHE"
print
self.active[fn] = citem
return True
if len(self.active) >= MAX_VIS_ITEMS:
return
if os.path.isdir(fn):
if openDir:
failures = []
successes = []
for f in os.listdir(fn):
if f not in ['.','..'] and os.path.splitext(f)[1] != '':
if not self.add(os.path.join(fn,f),openDir=False,warn=False):
failures.append(f)
else:
successes.append(f)
if len(failures) != 0 and len(successes) != 0:
QtWidgets.QMessageBox.warning(self.splitter,"Invalid items","Could not load files "+', '.join(failures)+" as Klamp't elements")
return True
else:
return False
path,ext = os.path.splitext(fn)
#print "Extension is",ext
if ext in world_item_extensions:
try:
worldid = self.tempWorld.loadElement(fn)
except Exception:
if warn:
QtWidgets.QMessageBox.warning(self.splitter,"Invalid item","Could not load "+fn+" as a Klamp't world element")
return False
if worldid < 0:
if warn:
QtWidgets.QMessageBox.warning(self.splitter,"Invalid item","Could not load "+fn+" as a Klamp't world element")
return False
obj = None
for i in xrange(self.tempWorld.numRobots()):
if self.tempWorld.robot(i).getID() == worldid:
obj = self.tempWorld.robot(i)
break
for i in xrange(self.tempWorld.numRigidObjects()):
if self.tempWorld.rigidObject(i).getID() == worldid:
obj = self.tempWorld.rigidObject(i)
break
for i in xrange(self.tempWorld.numTerrains()):
if self.tempWorld.terrain(i).getID() == worldid:
obj = self.tempWorld.terrain(i)
break
assert obj is not None,"Hmm... couldn't find world id %d in world?"%(worldid,)
self.loadedItem(fn,obj)
return True
try:
type = loader.filenameToType(fn)
except RuntimeError:
if warn:
QtWidgets.QMessageBox.warning(self.splitter,"Invalid item","Could not load file "+fn+" as a known Klamp't type")
return False
if type == 'xml':
#try loading a world
try:
world = WorldModel()
res = world.readFile(fn)
if not res:
try:
obj = loader.load('MultiPath',fn)
except Exception as e:
if warn:
print "klampt_browser: Trying MultiPath load, got exception",e
import traceback
traceback.print_exc()
QtWidgets.QMessageBox.warning(self.splitter,"Invalid WorldModel","Could not load "+fn+" as a world XML file")
return False
self.loadedItem(fn,obj)
return True
except IOError:
if warn:
QtWidgets.QMessageBox.warning(self.splitter,"Invalid WorldModel","Could not load "+fn+" as a world XML file")
return False
self.loadedItem(fn,world)
return
elif type == 'json':
import json
f = open(fn,'r')
jsonobj = json.load(f)
try:
obj = loader.fromJson(jsonobj)
except Exception:
if warn:
QtWidgets.QMessageBox.warning(self.splitter,"Invalid JSON","Could not recognize "+fn+" as a known Klamp't type")
return False
else:
try:
obj = loader.load(type,fn)
except Exception as e:
if warn:
QtWidgets.QMessageBox.warning(self.splitter,"Invalid item","Error while loading file "+fn+": "+str(e))
return False
self.loadedItem(fn,obj)
return True
def loadedItem(self,fn,obj):
if fn in self.active:
print "klampt_browser: Re-loaded item",fn,"so I'm first removing it"
self.remove(fn)
assert fn not in self.active
item = ResourceItem(obj)
self.active[fn] = item
item.plugin = vis.VisualizationPlugin()
basename = os.path.basename(fn)
#determine whether it's being animated
if isinstance(obj,Trajectory) and len(obj.milestones) > 0:
d = len(obj.milestones[0])
if self.world.numRobots() > 0 and d == self.world.robot(0).numLinks():
obj = RobotTrajectory(self.world.robot(0),obj.times,obj.milestones)
robotpath = ('world',self.world.robot(0).getName())
item.animationBuddy = robotpath
elif d == 3:
item.plugin.add("anim_point",[0,0,0])
item.animationBuddy = "anim_point"
elif d == 12:
item.plugin.add("anim_xform",se3.identity())
item.animationBuddy = "anim_xform"
else:
print "klampt_browser: Can't interpret trajectory of length",d
elif isinstance(obj,MultiPath):
if self.world.numRobots() > 0:
robotpath = ('world',self.world.robot(0).getName())
item.animationBuddy = robotpath
item.plugin.add("world",self.world)
item.plugin.add(basename,obj)
item.plugin.addText("label",basename,(10,10))
try:
type = vis.objectToVisType(obj,self.world)
except:
type = 'unknown'
if type in robot_override_types:
if self.world.numRobots() > 0:
path = ('world',self.world.robot(0).getName())
item.plugin.hide(path)
item.plugin.initialize()
def remove(self,fn,openDir=True):
if os.path.isdir(fn):
if openDir:
for f in os.listdir(fn):
if f not in ['.','..'] and os.path.splitext(f)[1] != '':
self.remove(os.path.join(fn,f),openDir=False)
return
if fn not in self.active:
return
if fn in self.modified:
reply = QtWidgets.QMessageBox.question(self, "Unsaved changes", "Would you like to save changes to " + ', '.join(self.modified)+ "?",
QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No);
if reply == QtWidgets.QMessageBox.Yes:
save(self.active[fn],fn)
self.modified.remove(fn)
s = self.active[fn]
del self.active[fn]
if s.program is not None:
copyCamera(s.program.view.camera,self.emptyVisProgram.view.camera)
print
print "klampt_browser: ADDING",fn,"TO CACHE"
print
self.visCache.append((fn,s))
if len(self.visCache) > MAX_VIS_CACHE:
self.visCache.pop(0)
cleartemp = isinstance(s.obj,(RobotModel,RigidObjectModel,TerrainModel))
if cleartemp:
for (k,v) in self.active.iteritems():
if isinstance(v.obj,(RobotModel,RigidObjectModel,TerrainModel)):
cleartemp = False
break
if cleartemp:
if self.tempWorld.numRobots() + self.tempWorld.numRigidObjects() + self.tempWorld.numTerrains() > 10:
print "klampt_browser: Clearing temp world..."
self.tempWorld = WorldModel()
self.visCache = [(fn,s) for (fn,s) in self.visCache if not isinstance(s.obj,(RobotModel,RigidObjectModel,TerrainModel))]
def maxGridItemsChanged(self):
self.refresh()
def refresh(self):
self.glviewportManager.clearViews()
if len(self.active) == 0:
self.glviewportManager.addView(self.emptyVisProgram)
else:
for k in sorted(self.active.keys()):
item = self.active[k]
if item.program is not None:
item.program.view.w,item.program.view.h = (640,480)
self.glviewportManager.addView(item.program)
else:
#new view
self.glviewportManager.addView(item.plugin)
item.program = self.glviewportManager.views[-1]
if self.autoFitCameraButton.isChecked():
item.plugin.autoFitViewport(item.program.view,[self.world,item.obj])
else:
copyCamera(self.emptyVisProgram.view.camera,item.program.view.camera)
if len(self.glviewportManager.views) >= self.maxGridItems.value()**2:
break
if self.glviewportManager.broadcast: #locking cameras
self.lockCameras()
self.glviewportManager.animationDuration = 0
for (k,item) in self.active.iteritems():
obj = item.obj
if isinstance(obj,(Trajectory,MultiPath)):
self.glviewportManager.animationDuration = max(self.glviewportManager.animationDuration,obj.duration())
print "klampt_browser: Setting animation duration to",self.glviewportManager.animationDuration
self.glviewportManager.refresh()
def main():
print """
===============================================================================
A program to quickly browse Klamp't objects.
USAGE: %s [item1 item2 ...]
where the given items are world, robot, terrain, object, or geometry files. Run
it without arguments
%s
for an empty reference world. You may add items to the reference world using
the `Add to World` button. If you know what items to use in the reference
world, run it with
%s world.xml
or
%s item1 item2 ...
where the items are world, robot, terrain, object, or geometry files.
===============================================================================
"""%(sys.argv[0],sys.argv[0],sys.argv[0],sys.argv[0])
#must be explicitly deleted for some reason in PyQt5...
g_browser = None
def makefunc(gl_backend):
global g_browser
browser = ResourceBrowser(gl_backend)
g_browser = browser
dw = QtWidgets.QDesktopWidget()
x=dw.width()*0.8
y=dw.height()*0.8
browser.setFixedSize(x,y)
for fn in sys.argv[1:]:
res = browser.world.readFile(fn)
if not res:
print "Unable to load model",fn
print "Quitting..."
sys.exit(1)
print "Added",fn,"to world"
if len(sys.argv) > 1:
browser.emptyVisPlugin.add("world",browser.world)
return browser
vis.customUI(makefunc)
vis.show()
vis.setWindowTitle("Klamp't Resource Browser")
vis.spin(float('inf'))
vis.kill()
del g_browser
return
#this code below is incorrect...
app = QtWidgets.QApplication(sys.argv)
browser = ResourceBrowser()
for fn in sys.argv[1:]:
res = browser.world.readFile(fn)
if not res:
print "Unable to load model",fn
print "Quitting..."
sys.exit(1)
print "Added",fn,"to world"
if len(sys.argv) > 1:
browser.emptyVisPlugin.add("world",browser.world)
dw = QtWidgets.QDesktopWidget()
x=dw.width()*0.8
y=dw.height()*0.8
browser.setFixedSize(x,y)
#browser.splitter.setWindowState(QtCore.Qt.WindowMaximized)
browser.setWindowTitle("Klamp't Resource Browser")
browser.show()
# Start the main loop.
res = app.exec_()
return res
if __name__ == '__main__':
res = main()
sys.exit(res) | bsd-3-clause | -2,346,998,096,091,174,000 | 40.664402 | 177 | 0.591541 | false |
bjodah/chempy | chempy/util/parsing.py | 1 | 18241 | # -*- coding: utf-8 -*-
""" Functions for chemical formulae and reactions """
from collections import defaultdict
import re
import warnings
from .pyutil import ChemPyDeprecationWarning, memoize
from .periodic import symbols
parsing_library = "pyparsing" # info used for selective testing.
def get_parsing_context():
""" returns the default dictionary for parsing strings in chempy """
import chempy
from chempy.kinetics import rates
from chempy.units import default_units, default_constants, to_unitless
globals_ = dict(to_unitless=to_unitless, chempy=chempy)
def _update(mod, keys=None):
if keys is None:
keys = dir(mod)
globals_.update({k: getattr(mod, k) for k in keys if not k.startswith("_")})
try:
import numpy
except ImportError:
def _numpy_not_installed_raise(*args, **kwargs):
raise ImportError("numpy not installed, no such method")
class numpy:
array = staticmethod(_numpy_not_installed_raise)
log = staticmethod(_numpy_not_installed_raise)
exp = staticmethod(_numpy_not_installed_raise)
_update(numpy, keys="array log exp".split()) # could of course add more
_update(rates)
_update(chempy)
for df in [default_units, default_constants]:
if df is not None:
globals_.update(df.as_dict())
return globals_
@memoize()
def _get_formula_parser():
"""Create a forward pyparsing parser for chemical formulae
BNF for simple chemical formula (no nesting)
integer :: '0'..'9'+
element :: 'A'..'Z' 'a'..'z'*
term :: element [integer]
formula :: term+
BNF for nested chemical formula
integer :: '0'..'9'+
element :: 'A'..'Z' 'a'..'z'*
term :: (element | '(' formula ')') [integer]
formula :: term+
Notes
-----
The code in this function is from an answer on StackOverflow:
http://stackoverflow.com/a/18555142/790973
written by:
Paul McGuire, http://stackoverflow.com/users/165216/paul-mcguire
in answer to the question formulated by:
Thales MG, http://stackoverflow.com/users/2708711/thales-mg
the code is licensed under 'CC-WIKI'.
(see: http://blog.stackoverflow.com/2009/06/attribution-required/)
"""
_p = __import__(parsing_library)
Forward, Group, OneOrMore = _p.Forward, _p.Group, _p.OneOrMore
Optional, ParseResults, Regex = _p.Optional, _p.ParseResults, _p.Regex
Suppress, Word, nums = _p.Suppress, _p.Word, _p.nums
LPAR, RPAR = map(Suppress, "()")
integer = Word(nums)
# add parse action to convert integers to ints, to support doing addition
# and multiplication at parse time
integer.setParseAction(lambda t: int(t[0]))
# element = Word(alphas.upper(), alphas.lower())
# or if you want to be more specific, use this Regex
element = Regex(
r"A[cglmrstu]|B[aehikr]?|C[adeflmnorsu]?|D[bsy]|E[rsu]|F[elmr]?|"
"G[ade]|H[efgos]?|I[nr]?|Kr?|L[airuv]|M[cdgnot]|N[abdehiop]?|"
"O[gs]?|P[abdmortu]?|R[abefghnu]|S[bcegimnr]?|T[abcehilms]|"
"U|V|W|Xe|Yb?|Z[nr]"
)
# forward declare 'formula' so it can be used in definition of 'term'
formula = Forward()
term = Group(
(element | Group(LPAR + formula + RPAR)("subgroup"))
+ Optional(integer, default=1)("mult")
)
# add parse actions for parse-time processing
# parse action to multiply out subgroups
def multiplyContents(tokens):
t = tokens[0]
# if these tokens contain a subgroup, then use multiplier to
# extend counts of all elements in the subgroup
if t.subgroup:
mult = t.mult
for term in t.subgroup:
term[1] *= mult
return t.subgroup
term.setParseAction(multiplyContents)
# add parse action to sum up multiple references to the same element
def sumByElement(tokens):
elementsList = [t[0] for t in tokens]
# construct set to see if there are duplicates
duplicates = len(elementsList) > len(set(elementsList))
# if there are duplicate element names, sum up by element and
# return a new nested ParseResults
if duplicates:
ctr = defaultdict(int)
for t in tokens:
ctr[t[0]] += t[1]
return ParseResults([ParseResults([k, v]) for k, v in ctr.items()])
# define contents of a formula as one or more terms
formula << OneOrMore(term)
formula.setParseAction(sumByElement)
return formula
def _get_charge(chgstr):
if chgstr == "+":
return 1
elif chgstr == "-":
return -1
for token, anti, sign in zip("+-", "-+", (1, -1)):
if token in chgstr:
if anti in chgstr:
raise ValueError("Invalid charge description (+ & - present)")
before, after = chgstr.split(token)
if len(before) > 0 and len(after) > 0:
raise ValueError("Values both before and after charge token")
if len(before) > 0:
# will_be_missing_in='0.8.0'
warnings.warn(
"'Fe/3+' deprecated, use e.g. 'Fe+3'",
ChemPyDeprecationWarning,
stacklevel=3,
)
return sign * int(1 if before == "" else before)
if len(after) > 0:
return sign * int(1 if after == "" else after)
raise ValueError("Invalid charge description (+ or - missing)")
def _formula_to_parts(formula, prefixes, suffixes):
# Drop prefixes and suffixes
drop_pref, drop_suff = [], []
for ign in prefixes:
if formula.startswith(ign):
drop_pref.append(ign)
formula = formula[len(ign) :]
for ign in suffixes:
if formula.endswith(ign):
drop_suff.append(ign)
formula = formula[: -len(ign)]
# Extract charge
if "/" in formula:
# will_be_missing_in='0.8.0'
warnings.warn(
"/ depr. (before 0.5.0): use 'Fe+3' over 'Fe/3+'",
ChemPyDeprecationWarning,
stacklevel=3,
)
parts = formula.split("/")
if "+" in parts[0] or "-" in parts[0]:
raise ValueError("Charge needs to be separated with a /")
if parts[1] is not None:
wo_pm = parts[1].replace("+", "").replace("-", "")
if wo_pm != "" and not str.isdigit(wo_pm):
raise ValueError("Non-digits in charge specifier")
if len(parts) > 2:
raise ValueError("At most one '/' allowed in formula")
else:
for token in "+-":
if token in formula:
if formula.count(token) > 1:
raise ValueError("Multiple tokens: %s" % token)
parts = formula.split(token)
parts[1] = token + parts[1]
break
else:
parts = [formula, None]
return parts + [tuple(drop_pref), tuple(drop_suff[::-1])]
def _parse_stoich(stoich):
if stoich == "e": # special case, the electron is not an element
return {}
return {
symbols.index(k) + 1: n for k, n in _get_formula_parser().parseString(stoich)
}
_greek_letters = (
"alpha",
"beta",
"gamma",
"delta",
"epsilon",
"zeta",
"eta",
"theta",
"iota",
"kappa",
"lambda",
"mu",
"nu",
"xi",
"omicron",
"pi",
"rho",
"sigma",
"tau",
"upsilon",
"phi",
"chi",
"psi",
"omega",
)
_greek_u = u"αβγδεζηθικλμνξοπρστυφχψω"
_latex_mapping = {k + "-": "\\" + k + "-" for k in _greek_letters}
_latex_mapping["epsilon-"] = "\\varepsilon-"
_latex_mapping["omicron-"] = "o-"
_latex_mapping["."] = "^\\bullet "
_latex_infix_mapping = {".": "\\cdot "}
_unicode_mapping = {k + "-": v + "-" for k, v in zip(_greek_letters, _greek_u)}
_unicode_mapping["."] = u"⋅"
_unicode_infix_mapping = {".": u"·"}
_html_mapping = {k + "-": "&" + k + ";-" for k in _greek_letters}
_html_mapping["."] = "⋅"
_html_infix_mapping = _html_mapping
def _get_leading_integer(s):
m = re.findall(r"^\d+", s)
if len(m) == 0:
m = 1
elif len(m) == 1:
s = s[len(m[0]) :]
m = int(m[0])
else:
raise ValueError("Failed to parse: %s" % s)
return m, s
def formula_to_composition(
formula, prefixes=None, suffixes=("(s)", "(l)", "(g)", "(aq)")
):
"""Parse composition of formula representing a chemical formula
Composition is represented as a dict mapping int -> int (atomic
number -> multiplicity). "Atomic number" 0 represents net charge.
Parameters
----------
formula: str
Chemical formula, e.g. 'H2O', 'Fe+3', 'Cl-'
prefixes: iterable strings
Prefixes to ignore, e.g. ('.', 'alpha-')
suffixes: tuple of strings
Suffixes to ignore, e.g. ('(g)', '(s)')
Examples
--------
>>> formula_to_composition('NH4+') == {0: 1, 1: 4, 7: 1}
True
>>> formula_to_composition('.NHO-(aq)') == {0: -1, 1: 1, 7: 1, 8: 1}
True
>>> formula_to_composition('Na2CO3.7H2O') == {11: 2, 6: 1, 8: 10, 1: 14}
True
"""
if prefixes is None:
prefixes = _latex_mapping.keys()
stoich_tok, chg_tok = _formula_to_parts(formula, prefixes, suffixes)[:2]
tot_comp = {}
parts = stoich_tok.split(".")
for idx, stoich in enumerate(parts):
if idx == 0:
m = 1
else:
m, stoich = _get_leading_integer(stoich)
comp = _parse_stoich(stoich)
for k, v in comp.items():
if k not in tot_comp:
tot_comp[k] = m * v
else:
tot_comp[k] += m * v
if chg_tok is not None:
tot_comp[0] = _get_charge(chg_tok)
return tot_comp
def _subs(string, patterns):
for patt, repl in patterns.items():
string = string.replace(patt, repl)
return string
def _parse_multiplicity(strings, substance_keys=None):
"""
Examples
--------
>>> _parse_multiplicity(['2 H2O2', 'O2']) == {'H2O2': 2, 'O2': 1}
True
>>> _parse_multiplicity(['2 * H2O2', 'O2']) == {'H2O2': 2, 'O2': 1}
True
>>> _parse_multiplicity(['']) == {}
True
>>> _parse_multiplicity(['H2O', 'H2O']) == {'H2O': 2}
True
"""
result = {}
for items in [re.split(" \\* | ", s) for s in strings]:
items = [x for x in items if x != ""]
if len(items) == 0:
continue
elif len(items) == 1:
if items[0] not in result:
result[items[0]] = 0
result[items[0]] += 1
elif len(items) == 2:
if items[1] not in result:
result[items[1]] = 0
result[items[1]] += (
float(items[0]) if "." in items[0] or "e" in items[0] else int(items[0])
)
else:
raise ValueError("To many parts in substring")
if substance_keys is not None:
for k in result:
if k not in substance_keys:
raise ValueError("Unkown substance_key: %s" % k)
return result
def to_reaction(line, substance_keys, token, Cls, globals_=None, **kwargs):
"""Parses a string into a Reaction object and substances
Reac1 + 2 Reac2 + (2 Reac1) -> Prod1 + Prod2; 10**3.7; ref='doi:12/ab'
Reac1 = Prod1; 2.1;
Parameters
----------
line: str
string representation to be parsed
substance_keys: iterable of strings
Allowed names, e.g. ('H2O', 'H+', 'OH-')
token : str
delimiter token between reactant and product side
Cls : class
e.g. subclass of Reaction
globals_: dict (optional)
Globals passed on to :func:`eval`, when ``None``:
`chempy.units.default_units` is used with 'chempy'
and 'default_units' extra entries.
Notes
-----
This function calls :func:`eval`, hence there are severe security concerns
with running this on untrusted data.
"""
if globals_ is None:
globals_ = get_parsing_context()
parts = line.rstrip("\n").split(";")
stoich = parts[0].strip()
if len(parts) > 2:
kwargs.update(eval("dict(" + ";".join(parts[2:]) + "\n)", globals_ or {}))
if len(parts) > 1:
param = parts[1].strip()
else:
param = kwargs.pop("param", "None")
if isinstance(param, str):
if param.startswith("'") and param.endswith("'") and "'" not in param[1:-1]:
from ..kinetics.rates import MassAction
from ._expr import Symbol
param = MassAction(Symbol(unique_keys=(param[1:-1],)))
else:
param = None if globals_ is False else eval(param, globals_)
if token not in stoich:
raise ValueError("Missing token: %s" % token)
reac_prod = [[y.strip() for y in x.split(" + ")] for x in stoich.split(token)]
act, inact = [], []
for elements in reac_prod:
act.append(
_parse_multiplicity(
[x for x in elements if not x.startswith("(")], substance_keys
)
)
inact.append(
_parse_multiplicity(
[x[1:-1] for x in elements if x.startswith("(") and x.endswith(")")],
substance_keys,
)
)
# stoich coeff -> dict
return Cls(
act[0], act[1], param, inact_reac=inact[0], inact_prod=inact[1], **kwargs
)
def _formula_to_format(
sub,
sup,
formula,
prefixes=None,
infixes=None,
suffixes=("(s)", "(l)", "(g)", "(aq)"),
):
parts = _formula_to_parts(formula, prefixes.keys(), suffixes)
stoichs = parts[0].split(".")
string = ""
for idx, stoich in enumerate(stoichs):
if idx == 0:
m = 1
else:
m, stoich = _get_leading_integer(stoich)
string += _subs(".", infixes)
if m != 1:
string += str(m)
string += re.sub(r"([0-9]+)", lambda m: sub(m.group(1)), stoich)
if parts[1] is not None:
chg = _get_charge(parts[1])
if chg < 0:
token = "-" if chg == -1 else "%d-" % -chg
if chg > 0:
token = "+" if chg == 1 else "%d+" % chg
string += sup(token)
if len(parts) > 4:
raise ValueError("Incorrect formula")
pre_str = "".join(map(lambda x: _subs(x, prefixes), parts[2]))
return pre_str + string + "".join(parts[3])
def formula_to_latex(formula, prefixes=None, infixes=None, **kwargs):
r"""Convert formula string to latex representation
Parameters
----------
formula: str
Chemical formula, e.g. 'H2O', 'Fe+3', 'Cl-'
prefixes: dict
Prefix transofmrations, default: greek letters and .
infixes: dict
Infix transformations, default: .
suffixes: iterable of str
What suffixes not to interpret, default: (s), (l), (g), (aq)
Examples
--------
>>> formula_to_latex('NH4+')
'NH_{4}^{+}'
>>> formula_to_latex('Fe(CN)6+2')
'Fe(CN)_{6}^{2+}'
>>> formula_to_latex('Fe(CN)6+2(aq)')
'Fe(CN)_{6}^{2+}(aq)'
>>> formula_to_latex('.NHO-(aq)')
'^\\bullet NHO^{-}(aq)'
>>> formula_to_latex('alpha-FeOOH(s)')
'\\alpha-FeOOH(s)'
"""
if prefixes is None:
prefixes = _latex_mapping
if infixes is None:
infixes = _latex_infix_mapping
return _formula_to_format(
lambda x: "_{%s}" % x,
lambda x: "^{%s}" % x,
formula,
prefixes,
infixes,
**kwargs
)
_unicode_sub = {}
for k, v in enumerate(u"₀₁₂₃₄₅₆₇₈₉"):
_unicode_sub[str(k)] = v
_unicode_sup = {
"+": u"⁺",
"-": u"⁻",
}
for k, v in enumerate(u"⁰¹²³⁴⁵⁶⁷⁸⁹"):
_unicode_sup[str(k)] = v
def formula_to_unicode(formula, prefixes=None, infixes=None, **kwargs):
u"""Convert formula string to unicode string representation
Parameters
----------
formula : str
Chemical formula, e.g. 'H2O', 'Fe+3', 'Cl-'
prefixes : dict
Prefix transofmrations, default: greek letters and .
infixes : dict
Infix transofmrations, default: .
suffixes : tuple of strings
Suffixes to keep, e.g. ('(g)', '(s)')
Examples
--------
>>> formula_to_unicode('NH4+') == u'NH₄⁺'
True
>>> formula_to_unicode('Fe(CN)6+2') == u'Fe(CN)₆²⁺'
True
>>> formula_to_unicode('Fe(CN)6+2(aq)') == u'Fe(CN)₆²⁺(aq)'
True
>>> formula_to_unicode('.NHO-(aq)') == u'⋅NHO⁻(aq)'
True
>>> formula_to_unicode('alpha-FeOOH(s)') == u'α-FeOOH(s)'
True
"""
if prefixes is None:
prefixes = _unicode_mapping
if infixes is None:
infixes = _unicode_infix_mapping
return _formula_to_format(
lambda x: "".join(_unicode_sub[str(_)] for _ in x),
lambda x: "".join(_unicode_sup[str(_)] for _ in x),
formula,
prefixes,
infixes,
**kwargs
)
def formula_to_html(formula, prefixes=None, infixes=None, **kwargs):
u"""Convert formula string to html string representation
Parameters
----------
formula : str
Chemical formula, e.g. 'H2O', 'Fe+3', 'Cl-'
prefixes : dict
Prefix transformations, default: greek letters and .
infixes : dict
Infix transformations, default: .
suffixes : tuple of strings
Suffixes to keep, e.g. ('(g)', '(s)')
Examples
--------
>>> formula_to_html('NH4+')
'NH<sub>4</sub><sup>+</sup>'
>>> formula_to_html('Fe(CN)6+2')
'Fe(CN)<sub>6</sub><sup>2+</sup>'
>>> formula_to_html('Fe(CN)6+2(aq)')
'Fe(CN)<sub>6</sub><sup>2+</sup>(aq)'
>>> formula_to_html('.NHO-(aq)')
'⋅NHO<sup>-</sup>(aq)'
>>> formula_to_html('alpha-FeOOH(s)')
'α-FeOOH(s)'
"""
if prefixes is None:
prefixes = _html_mapping
if infixes is None:
infixes = _html_infix_mapping
return _formula_to_format(
lambda x: "<sub>%s</sub>" % x,
lambda x: "<sup>%s</sup>" % x,
formula,
prefixes,
infixes,
**kwargs
)
| bsd-2-clause | -7,726,565,216,656,472,000 | 28.518699 | 88 | 0.546326 | false |
Duke-Medical-Instrumentation/Respi-Rite | Software/FinalCode/date_time.py | 1 | 3127 | def checkTime(path, whichExercise, userID):
# This function checks the time to ensure the user is not over training
# Import important libraries
import os
import time
import pickle
from mainmenu import menu
oDir = os.getcwd()
os.chdir(path)
cur_time = time.localtime()
cur_epoch = time.time()
if whichExercise == 1: # Inspirator
f2 = open('parameters_i.pickle', 'rb')
breath_num, reps_per_day, time_between, resistance, num_sessions_completed, last_time, epoch_time, num_sessions_ever = pickle.load(f2)
f2.close()
elif whichExercise == 2: # Expirator
f2 = open('parameters_e.pickle', 'rb')
breath_num, reps_per_day, time_between, resistance, num_sessions_completed, last_time, epoch_time, num_sessions_ever = pickle.load(f2)
f2.close()
os.chdir(oDir)
# If more than a day has passed, then the number of sessions completed that day is set back to zero
if (cur_time[2] == 1 and last_time[2] > 27): #Just in case it's a new month!
num_sessions_completed = 0
os.chdir(path)
if whichExercise == 1:
f = open('parameters_i.pickle', 'wb')
elif whichExercise == 2:
f = open('parameters_e.pickle', 'wb')
pickle.dump([breath_num, reps_per_day, time_between, resistance, num_sessions_completed, last_time, epoch_time, num_sessions_ever], f)
f.close()
os.chdir(oDir)
elif (cur_time[2] > last_time[2]):
num_sessions_completed = 0
os.chdir(path)
if whichExercise == 1:
f = open('parameters_i.pickle', 'wb')
elif whichExercise == 2:
f = open('parameters_e.pickle', 'wb')
pickle.dump([breath_num, reps_per_day, time_between, resistance, num_sessions_completed, last_time, epoch_time, num_sessions_ever], f)
f.close()
os.chdir(oDir)
# Check that they are meeting the reps_per_day and time_between requirements set by Dr. Jones
if num_sessions_completed == reps_per_day:
# If the user has maxed out their maximum sessions per day
print('\n')
print 'You cannot exceed more than ', reps_per_day, ' sessions per day'
raw_input('\n\nPress any key to return to Main Menu\n')
menu(path, userID);
elif num_sessions_completed == 0: # User hasn't even started training today
os.system('clear')
elif (num_sessions_completed > 0 and num_sessions_completed < reps_per_day and cur_epoch > (epoch_time + (3600*time_between))):
# They haven't reached their recommended number of sessions per day yet
# AND the appropriate amount of time has passed
os.system('clear')
else:
# Otherwise they haven't met the time between sessions requirement
print('\n')
print 'You must wait ', time_between, ' hours before continuing training.\n'
print('\nLast training: ')
lasttime = time.asctime(last_time)
print lasttime
raw_input('\n\nPress ENTER to return to Main Menu\n')
menu(path, userID);
| apache-2.0 | -7,552,014,188,763,341,000 | 43.056338 | 142 | 0.622322 | false |
andresriancho/nimbostratus-target | servers/django_frontend/deploy.py | 1 | 3653 | import sys
import requests
import os
import time
import logging
from core.region_connection import EC2Connection
from config import AMI, SIZE, DEPLOY_PRIVATE_PATH, DEPLOY_PUBLIC_PATH
from aws.keypair import create_keypair
from aws.ec2 import create_instance_profile
TEST_URL = 'http://%s/?url=http://httpbin.org/user-agent'
NAME = 'django_frontend_nimbostratus'
SG_NAME = '%s_sg' % NAME
SQS_POLICY = """{
"Statement":[{
"Effect":"Allow",
"Action":["sqs:*"],
"Resource": "*"}]}
"""
SUCCESS_MESSAGE = '''\
You can connect to it via SSH and HTTP:
http://%s/
http://%s/?url=http://httpbin.org/user-agent
ssh -i django_frontend_nimbostratus.pem ubuntu@%s
'''
def deploy_django_frontend():
conn = EC2Connection()
logging.info('Launching Django frontend instance')
keypair_name = create_keypair(NAME)
user_data = get_user_data()
security_group = create_security_group()
instance_profile = create_instance_profile(NAME, SQS_POLICY)
my_reservation = conn.run_instances(AMI,
instance_type=SIZE,
key_name=keypair_name,
user_data=user_data,
security_groups=[security_group,],
instance_profile_name=instance_profile)
instance = my_reservation.instances[0]
while not instance.update() == 'running':
logging.debug('Waiting for instance to start...')
time.sleep(10)
logging.info('Checking if instance was correctly configured (this usually takes 5min)')
conn.create_tags([instance.id], {"Name": NAME})
for _ in xrange(10):
time.sleep(60)
try:
response = requests.get(TEST_URL % instance.public_dns_name)
except Exception:
logging.debug('Instance did not boot yet...')
else:
assert 'python-requests' in response.text, 'Incorrectly configured!'
break
else:
raise Exception('Timeout! Instance failed to boot.')
logging.info('Successfully started %s' % NAME)
logging.debug(SUCCESS_MESSAGE % (instance.public_dns_name,
instance.public_dns_name,
instance.public_dns_name))
def create_security_group():
conn = EC2Connection()
for sg in conn.get_all_security_groups():
if sg.name == SG_NAME:
return SG_NAME
web = conn.create_security_group(SG_NAME, 'Allow ports 80 and 22.')
web.authorize('tcp', 80, 80, '0.0.0.0/0')
web.authorize('tcp', 22, 22, '0.0.0.0/0')
return SG_NAME
def get_user_data():
'''
:return: A string which contains the user_data.py file contents with the
replaced variables.
'''
user_data = file('servers/django_frontend/user_data.py').read()
user_data = user_data.replace('__VULNWEB_DEPLOY_PRIVATE_KEY__',
file(DEPLOY_PRIVATE_PATH).read())
user_data = user_data.replace('__VULNWEB_DEPLOY_PUBLIC_KEY__',
file(DEPLOY_PUBLIC_PATH).read())
return user_data
def verify_config():
if not os.path.exists(DEPLOY_PRIVATE_PATH) or not \
os.path.exists(DEPLOY_PUBLIC_PATH):
logging.critical('You need to setup your Github repository with'\
' SSH deploy keys and set the path to those files'
' in the config.py file. See: https://help.github.com/articles/managing-deploy-keys')
sys.exit(1)
| agpl-3.0 | -4,153,315,520,433,086,000 | 32.513761 | 110 | 0.582809 | false |
s-block/django-nested-inline | example/app/tests/test_admin.py | 1 | 1076 | from django.contrib.auth.models import User
from django.template.response import TemplateResponse
from django.test import TestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class TopLevelAdminTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email='[email protected]')
def setUp(self):
try:
self.client.force_login(self.superuser)
except AttributeError:
self.client.login(username=self.superuser.username, password='secret')
def test_changelist(self):
response = self.client.get(reverse('admin:app_toplevel_changelist'))
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_view(self):
response = self.client.get(reverse('admin:app_toplevel_add'))
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
| mit | -1,289,738,787,592,513,800 | 34.866667 | 117 | 0.719331 | false |
aforren1/toon | tests/anim/test_easing.py | 1 | 1319 | from toon.anim.interpolators import LERP, SELECT
from toon.anim.interpolators import _test as _itest
from toon.anim.easing import (LINEAR, STEP,
SMOOTHSTEP, SMOOTHERSTEP,
QUADRATIC_IN, QUADRATIC_OUT, QUADRATIC_IN_OUT,
EXPONENTIAL_IN, EXPONENTIAL_OUT, EXPONENTIAL_IN_OUT,
ELASTIC_IN, ELASTIC_OUT, ELASTIC_IN_OUT,
BACK_IN, BACK_OUT, BACK_IN_OUT,
BOUNCE_IN, BOUNCE_OUT, BOUNCE_IN_OUT)
from toon.anim.easing import _test as _etest
from pytest import approx
interps = [LERP, SELECT]
easings = [LINEAR, STEP,
SMOOTHSTEP, SMOOTHERSTEP,
QUADRATIC_IN, QUADRATIC_OUT, QUADRATIC_IN_OUT,
EXPONENTIAL_IN, EXPONENTIAL_OUT, EXPONENTIAL_IN_OUT,
ELASTIC_IN, ELASTIC_OUT, ELASTIC_IN_OUT,
BACK_IN, BACK_OUT, BACK_IN_OUT,
BOUNCE_IN, BOUNCE_OUT, BOUNCE_IN_OUT]
def test_easings():
for i in easings:
assert(_etest(0.5, i) != 0)
assert(_etest(0, i) == approx(0))
assert(_etest(1, i) == approx(1))
def test_interps():
for i in interps:
assert(_itest(0, 1, 0, i) == 0)
assert(_itest(0, 1, 1, i) == 1)
assert(_itest(0, 1, 0.5, LERP) == 0.5)
| mit | -3,562,907,289,130,570,000 | 36.685714 | 82 | 0.557998 | false |
chrsrds/scikit-learn | sklearn/neighbors/lof.py | 3 | 20358 | # Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import warnings
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import UnsupervisedMixin
from ..base import OutlierMixin
from ..utils.validation import check_is_fitted
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(NeighborsBase, KNeighborsMixin, UnsupervisedMixin,
OutlierMixin):
"""Unsupervised Outlier Detection using Local Outlier Factor (LOF)
The anomaly score of each sample is called Local Outlier Factor.
It measures the local deviation of density of a given sample with
respect to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of
its neighbors, one can identify samples that have a substantially lower
density than their neighbors. These are considered outliers.
Parameters
----------
n_neighbors : int, optional (default=20)
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, default 'minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If 'precomputed', the training input X is expected to be a distance
matrix.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
https://docs.scipy.org/doc/scipy/reference/spatial.distance.html
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this
is equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
contamination : 'auto' or float, optional (default='auto')
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the scores of the samples.
- if 'auto', the threshold is determined as in the
original paper,
- if a float, the contamination should be in the range [0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
novelty : boolean, default False
By default, LocalOutlierFactor is only meant to be used for outlier
detection (novelty=False). Set novelty to True if you want to use
LocalOutlierFactor for novelty detection. In this case be aware that
that you should only use predict, decision_function and score_samples
on new unseen data and not on the training set.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Attributes
----------
negative_outlier_factor_ : numpy array, shape (n_samples,)
The opposite LOF of the training samples. The higher, the more normal.
Inliers tend to have a LOF score close to 1 (``negative_outlier_factor_``
close to -1), while outliers tend to have a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : integer
The actual number of neighbors used for :meth:`kneighbors` queries.
offset_ : float
Offset used to obtain binary labels from the raw scores.
Observations having a negative_outlier_factor smaller than `offset_`
are detected as abnormal.
The offset is set to -1.5 (inliers score around -1), except when a
contamination parameter different than "auto" is provided. In that
case, the offset is defined in such a way we obtain the expected
number of outliers in training.
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
"""
def __init__(self, n_neighbors=20, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None,
contamination="auto", novelty=False, n_jobs=None):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.contamination = contamination
self.novelty = novelty
@property
def fit_predict(self):
""""Fits the model to the training set X and returns the labels.
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
y : Ignored
not used, present for API consistency by convention.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
if self.novelty:
msg = ('fit_predict is not available when novelty=True. Use '
'novelty=False if you want to predict on the training set.')
raise AttributeError(msg)
return self._fit_predict
def _fit_predict(self, X, y=None):
""""Fits the model to the training set X and returns the labels.
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : Ignored
not used, present for API consistency by convention.
Returns
-------
self : object
"""
if self.contamination != 'auto':
if not(0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5], "
"got: %f" % self.contamination)
super().fit(X)
n_samples = self._fit_X.shape[0]
if self.n_neighbors > n_samples:
warnings.warn("n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = (
self.kneighbors(None, n_neighbors=self.n_neighbors_))
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_)
# Compute lof score over training samples to define offset_:
lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /
self._lrd[:, np.newaxis])
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == "auto":
# inliers score around -1 (the higher, the less abnormal).
self.offset_ = -1.5
else:
self.offset_ = np.percentile(self.negative_outlier_factor_,
100. * self.contamination)
return self
@property
def predict(self):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
This method allows to generalize prediction to *new observations* (not
in the training set). Only available for novelty detection (when
novelty is set to True).
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
if not self.novelty:
msg = ('predict is not available when novelty=False, use '
'fit_predict if you want to predict on training data. Use '
'novelty=True if you want to use LOF for novelty detection '
'and predict on new unseen data.')
raise AttributeError(msg)
return self._predict
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self, ["offset_", "negative_outlier_factor_",
"n_neighbors_", "_distances_fit_X_"])
if X is not None:
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
else:
is_inlier = np.ones(self._fit_X.shape[0], dtype=int)
is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
return is_inlier
@property
def decision_function(self):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
The shift offset allows a zero threshold for being an outlier.
Only available for novelty detection (when novelty is set to True).
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : array, shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
if not self.novelty:
msg = ('decision_function is not available when novelty=False. '
'Use novelty=True if you want to use LOF for novelty '
'detection and compute decision_function for new unseen '
'data. Note that the opposite LOF of the training samples '
'is always available by considering the '
'negative_outlier_factor_ attribute.')
raise AttributeError(msg)
return self._decision_function
def _decision_function(self, X):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
The shift offset allows a zero threshold for being an outlier.
Only available for novelty detection (when novelty is set to True).
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : array, shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
return self._score_samples(X) - self.offset_
@property
def score_samples(self):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
Only available for novelty detection (when novelty is set to True).
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The score_samples on training data is available by considering the
the ``negative_outlier_factor_`` attribute.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : array, shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
if not self.novelty:
msg = ('score_samples is not available when novelty=False. The '
'scores of the training samples are always available '
'through the negative_outlier_factor_ attribute. Use '
'novelty=True if you want to use LOF for novelty detection '
'and compute score_samples for new unseen data.')
raise AttributeError(msg)
return self._score_samples
def _score_samples(self, X):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
Only available for novelty detection (when novelty is set to True).
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The score_samples on training data is available by considering the
the ``negative_outlier_factor_`` attribute.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : array, shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self, ["offset_", "negative_outlier_factor_",
"_distances_fit_X_"])
X = check_array(X, accept_sparse='csr')
distances_X, neighbors_indices_X = (
self.kneighbors(X, n_neighbors=self.n_neighbors_))
X_lrd = self._local_reachability_density(distances_X,
neighbors_indices_X)
lrd_ratios_array = (self._lrd[neighbors_indices_X] /
X_lrd[:, np.newaxis])
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : array, shape (n_query, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : array, shape (n_query, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : array, shape (n_samples,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices,
self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
| bsd-3-clause | -8,288,186,774,796,448,000 | 39.879518 | 81 | 0.614697 | false |
heromod/migrid | mig/grsfs-fuse/benchmarks/code/benchmark_ro.py | 1 | 3646 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# benchmark_ro - benchmark read-only access
# Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Benchmark with read-only file access"""
import os
import sys
import timeit
import pprint
import getopt
# dd if=/dev/urandom of=readfile bs=1048576 count=100
def default_configuration():
"""Return dictionary with default configuration values"""
conf = {'repeat': 3, 'number': 1000}
return conf
def usage():
"""Usage help"""
print("Usage: %s" % sys.argv[0])
print("Run read-only benchmark")
print("Options and default values:")
for (key, val) in default_configuration().items():
print("--%s: %s" % (key, val))
def read_mark(size, filehandle):
"""Read size bytes from filehandle"""
#print "Reading %d from %s" % (size, filehandle)
#filehandle.seek(0)
out = filehandle.read(size)
#assert len(out) == size
def prepare_files(conf):
"""Set up files used in benchmark"""
if not os.path.exists("readfile"):
data = open("/dev/urandom").read(conf['data_bytes'])
readfile = open("readfile", "wb")
readfile.write(data)
readfile.close()
def main(conf):
"""Run timed benchmark"""
read_sequence = [1, 2, 16, 256, 512, 1024, 2048, 4096, 8192, 16384]
read_results = []
prepare_files(conf)
for i in read_sequence:
read_results.append((i, max(
timeit.repeat("read_mark(%s, filehandle)" % i,
setup = conf['setup'], repeat=conf['repeat'],
number=conf['number']))))
out = pprint.PrettyPrinter()
out.pprint(read_results)
if __name__ == '__main__':
conf = default_configuration()
# Parse command line
try:
(opts, args) = getopt.getopt(sys.argv[1:],
'hn:r:', [
'help',
'number=',
'repeat=',
])
except getopt.GetoptError, err:
print('Error in option parsing: ' + err.msg)
usage()
sys.exit(1)
for (opt, val) in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-n', '--number'):
try:
conf["number"] = int(val)
except ValueError, err:
print('Error in parsing %s value: %s' % (opt, err))
sys.exit(1)
elif opt in ('-r', '--repeat'):
try:
conf["repeat"] = int(val)
except ValueError, err:
print('Error in parsing %s value: %s' % (opt, err))
sys.exit(1)
else:
print("unknown option: %s" % opt)
usage()
sys.exit(1)
conf['setup'] = """
import os
from __main__ import read_mark
filehandle = open('readfile', 'r')"""
main(conf)
| gpl-2.0 | -6,773,150,630,597,240,000 | 28.403226 | 81 | 0.572957 | false |
LambdaCast/LambdaCast | portal/views.py | 1 | 16341 | # -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core import serializers
from django.utils.translation import ugettext_lazy as _
from django.template.response import TemplateResponse
from portal.models import MediaItem, Comment, Channel, Collection, Submittal, MediaFile
from portal.forms import MediaItemForm, CommentForm, getThumbnails, ThumbnailForm, SubmittalForm
from portal.media_formats import MEDIA_FORMATS
from portal.templatetags.custom_filters import seconds_to_hms
from taggit.models import Tag
import lambdaproject.settings as settings
import djangotasks
import os
import re
from operator import attrgetter
import itertools
def index(request):
''' This view is the front page of OwnTube. It just gets the first 15 available media items and
forwards them to the template. We use Django's Paginator to have pagination '''
if request.user.is_authenticated():
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True).order_by('-date','-modified'),Collection.objects.all().order_by('-created'))
else:
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, published=True).order_by('-date','-modified'),Collection.objects.all().order_by('-created'))
queryset_sorted = sorted(queryset, key=attrgetter('date', 'created'), reverse=True)
paginator = Paginator(queryset_sorted,16)
channel_list = Channel.objects.all()
page = request.GET.get('page')
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/latest/"+file_type))
rss_list.append(('torrent','torrent','/feeds/latest/torrent'))
try:
mediaitems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
mediaitems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
mediaitems = paginator.page(paginator.num_pages)
return TemplateResponse(request, 'portal/index.html', {'latest_mediaitems_list': mediaitems, 'channel_list': channel_list, 'rss_list': rss_list})
def channel_list(request,slug):
''' This view is the view for the channel's list it works almost like the index view'''
channel = get_object_or_404(Channel, slug=slug)
if request.user.is_authenticated():
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, channel__slug=slug).order_by('-date','-modified'),Collection.objects.filter(channel__slug=slug).order_by('-created'))
else:
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, published=True, channel__slug=slug).order_by('-date','-modified'),Collection.objects.filter(channel__slug=slug).order_by('-created'))
queryset_sorted = sorted(queryset, key=attrgetter('date', 'created'), reverse=True)
paginator = Paginator(queryset_sorted,15)
channel_list = Channel.objects.all()
page = request.GET.get('page')
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/"+channel.slug+"/"+file_type))
rss_list.append(('torrent','torrent','/feeds/'+channel.slug+'/torrent'))
try:
mediaitems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
mediaitems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
mediaitems = paginator.page(paginator.num_pages)
return TemplateResponse(request, 'portal/channel.html', {'mediaitems_list': mediaitems, 'channel': channel, 'channel_list': channel_list, 'rss_list': rss_list})
@login_required
def get_duration(request, slug):
mediaitem = get_object_or_404(MediaItem, slug=slug)
if mediaitem.get_and_save_duration():
duration_feedback = seconds_to_hms(mediaitem.duration)
else:
duration_feedback = "Error"
return HttpResponse(duration_feedback)
def detail(request, slug):
''' Handles the detail view of a media item (the player so to say) and handles the comments (this should become nicer with AJAX and stuff)'''
mediaitem = get_object_or_404(MediaItem, slug=slug)
if request.user.is_authenticated():
comment_list = Comment.objects.filter(item=mediaitem).order_by('-created')
else:
comment_list = Comment.objects.filter(item=mediaitem,moderated=True).order_by('-created')
if request.method == 'POST':
comment = Comment(item=mediaitem,ip=request.META["REMOTE_ADDR"])
form = CommentForm(request.POST, instance=comment)
if form.is_valid():
comment = form.save(commit=False)
comment.save()
message = _(u"Your comment will be moderated")
comment.send_notification_mail()
return TemplateResponse(request, 'portal/items/detail.html', {'comment_list': comment_list, 'mediaitem': mediaitem, 'comment_form': CommentForm(), 'message': message})
else:
return TemplateResponse(request, 'portal/items/detail.html', {'comment_list': comment_list, 'mediaitem': mediaitem, 'comment_form': form})
else:
form = CommentForm()
return TemplateResponse(request, 'portal/items/detail.html', {'mediaitem': mediaitem, 'comment_list': comment_list, 'comment_form': form})
def iframe(request, slug):
''' Returns an iframe for a item so that media items can be shared easily '''
mediaitem = get_object_or_404(MediaItem, slug=slug)
return TemplateResponse(request, 'portal/items/iframe.html', {'mediaitem': mediaitem})
def tag(request, tag):
''' Gets all media items for a specified tag'''
if request.user.is_authenticated():
mediaitemslist = MediaItem.objects.filter(encodingDone=True, tags__slug__in=[tag]).order_by('-date')
else:
mediaitemslist = MediaItem.objects.filter(encodingDone=True, published=True, tags__slug__in=[tag]).order_by('-date')
tag_name = get_object_or_404(Tag, slug=tag)
return TemplateResponse(request, 'portal/items/list.html', {'mediaitems_list': mediaitemslist, 'tag': tag_name})
def collection(request, slug):
''' Gets all media items for a channel'''
collection = get_object_or_404(Collection, slug=slug)
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/collection/"+collection.slug+"/"+file_type))
if request.user.is_authenticated():
mediaitemslist = collection.items.filter(encodingDone=True)
else:
mediaitemslist = collection.items.filter(encodingDone=True, published=True)
return TemplateResponse(request, 'portal/collection.html', {'mediaitems_list': mediaitemslist, 'collection': collection, 'rss_list': rss_list })
def search(request):
''' The search view for handling the search using Django's "Q"-class (see normlize_query and _get_query)'''
query_string = ''
found_entries = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = _get_query(query_string, ['title', 'description', 'tags__name'])
if request.user.is_authenticated():
found_entries = MediaItem.objects.filter(entry_query).order_by('-date')
else:
found_entries = MediaItem.objects.filter(entry_query, published=True).order_by('-date')
return TemplateResponse(request, 'portal/search_results.html', { 'query_string': query_string, 'mediaitems_list': found_entries})
def search_json(request):
''' The search view for handling the search using Django's "Q"-class (see normlize_query and _get_query)'''
query_string = ''
found_entries = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = _get_query(query_string, ['title', 'description','tags__name'])
found_entries = MediaItem.objects.filter(entry_query).order_by('-date')
data = serializers.serialize('json', found_entries)
return HttpResponse(data, content_type = 'application/javascript; charset=utf8')
def tag_json(request, tag):
mediaitemslist = MediaItem.objects.filter(encodingDone=True, published=True, tags__name__in=[tag]).order_by('-date')
data = serializers.serialize('json', mediaitemslist)
return HttpResponse(data, content_type = 'application/javascript; charset=utf8')
@login_required
def submittal(request, subm_id):
submittal = get_object_or_404(Submittal, pk = subm_id)
if request.method == 'POST':
form = SubmittalForm(request.POST)
if form.is_valid():
mediaitem = form.save()
mediaitem.user = request.user
mediaitem.save()
form.create_mediafiles(mediaitem)
mediaitem.get_and_save_duration()
return redirect(index)
else:
return TemplateResponse(request, 'portal/submittal.html', {'submittal_form': form, 'submittal': submittal})
else:
form = SubmittalForm(initial={
'title': submittal.media_title,
'description': submittal.media_description,
'channel': submittal.media_channel,
'license': submittal.media_license,
'linkURL': submittal.media_linkURL,
'torrentURL': submittal.media_torrentURL,
'media_mp3URL': submittal.media_mp3URL,
'media_oggURL': submittal.media_oggURL,
'media_opusURL': submittal.media_opusURL,
'videoThumbURL': submittal.media_videoThumbURL,
'audioThumbURL': submittal.media_audioThumbURL,
'published': submittal.media_published,
'tags': ", ".join(str(x) for x in submittal.media_tags.all()),
'torrentDone': submittal.media_torrentDone,
'encodingDone': True,
})
return TemplateResponse(request, 'portal/submittal.html', {'submittal_form': form, 'submittal': submittal})
@login_required
def upload_thumbnail(request):
if request.method == 'POST':
form = ThumbnailForm(request.POST, request.FILES or None)
if form.is_valid():
if (request.FILES['file'].content_type == 'image/png' or request.FILES['file'].content_type == 'image/jpeg') and not form.data['title'] == '':
_handle_uploaded_thumbnail(request.FILES['file'], form.data['title'])
message = _("The upload of %s was successful") % (form.data['title'])
form = ThumbnailForm()
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': ThumbnailForm(), 'thumbs_list':_get_thumbnails_list, 'message': message})
else:
error = _("Please upload an image file")
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': form, 'thumbs_list':_get_thumbnails_list, 'error': error})
else:
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': form, 'thumbs_list':_get_thumbnails_list})
else:
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': ThumbnailForm(), 'thumbs_list':_get_thumbnails_list})
def _handle_uploaded_thumbnail(f, filename):
suffix = '.png' if (f.content_type == 'image/png') else '.jpg'
suffix = '' if (filename.endswith(suffix)) else suffix
destination = open(settings.THUMBNAILS_DIR + filename + suffix, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
@login_required
def submit(request):
''' The view for uploading the items. Only authenticated users can upload media items!
We use django tasks to make a new task task for encoding this items. If we use
bittorrent to distribute our files we also use django tasks to make the .torrent
files (this can take a few minutes for very large files '''
if request.method == 'POST':
form = MediaItemForm(request.POST, request.FILES or None)
if form.is_valid():
media_item = form.save()
if form.cleaned_data['thumbURL']:
media_item.audioThumbURL = form.cleaned_data['thumbURL']
media_item.videoThumbURL = form.cleaned_data['thumbURL']
media_item.user = request.user
media_item.save()
media_item.get_and_save_duration()
outputdir = settings.ENCODING_OUTPUT_DIR + media_item.slug
if not os.path.exists(outputdir):
os.makedirs(outputdir)
cover_task = djangotasks.task_for_object(media_item.get_and_save_cover)
djangotasks.run_task(cover_task)
for target_format in form.cleaned_data['fileFormats']:
media_format = MEDIA_FORMATS[target_format]
url = settings.ENCODED_BASE_URL + media_item.slug + '/' + media_item.slug + media_format.extension
media_file = MediaFile.objects.create(title=media_item.title + " " + media_format.text,
url=url, file_format=media_format.format_key,
media_item=media_item, mediatype=media_format.mediatype)
encoding_task = djangotasks.task_for_object(media_file.encode_media)
djangotasks.run_task(encoding_task)
if settings.USE_BITTORRENT:
torrent_task = djangotasks.task_for_object(media_item.create_bittorrent)
djangotasks.run_task(torrent_task)
return redirect(index)
return TemplateResponse(request, 'portal/submit.html', {'submit_form': form})
else:
form = MediaItemForm()
return TemplateResponse(request, 'portal/submit.html', {'submit_form': form})
@login_required
def status(request):
tasks_mediaitem = djangotasks.models.Task.objects.filter(model="portal.mediaitem").exclude(status="successful")
tasks_mediafile = djangotasks.models.Task.objects.filter(model="portal.mediafile").exclude(status="successful")
mediaitem_ids = set(map((lambda mediaitem: mediaitem.object_id), tasks_mediaitem))
for mediafile in tasks_mediafile:
try:
mediaitem_ids.add(MediaFile.objects.get(pk=mediafile.object_id).media_item.pk)
except MediaFile.DoesNotExist:
pass
mediaitems = MediaItem.objects.filter(pk__in=mediaitem_ids)
return TemplateResponse(request, 'portal/status.html', {'mediaitems': mediaitems})
def _normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> _normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def _get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = _normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def _get_thumbnails_list():
thumbnails_list = getThumbnails(settings.THUMBNAILS_DIR)
del thumbnails_list[0]
return thumbnails_list
| bsd-2-clause | 1,480,997,677,373,973,200 | 49.435185 | 212 | 0.663668 | false |
akrherz/iem | scripts/current/stage4_hourly.py | 1 | 2162 | """
Plot the hourly stage IV precip data
"""
import sys
import os
import datetime
import pygrib
import pytz
from pyiem.util import utc, logger
from pyiem.plot import MapPlot, get_cmap
LOG = logger()
def doit(ts):
"""
Generate hourly plot of stage4 data
"""
gmtnow = datetime.datetime.utcnow()
gmtnow = gmtnow.replace(tzinfo=pytz.utc)
routes = "a"
if ((gmtnow - ts).days * 86400.0 + (gmtnow - ts).seconds) < 7200:
routes = "ac"
fn = "/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.01h.grib" % (
ts.strftime("%Y/%m/%d"),
ts.strftime("%Y%m%d%H"),
)
if not os.path.isfile(fn):
LOG.info("Missing stage4 %s", fn)
return
grbs = pygrib.open(fn)
grib = grbs[1]
lats, lons = grib.latlons()
vals = grib.values / 25.4
cmap = get_cmap("jet")
cmap.set_under("white")
cmap.set_over("black")
clevs = [
0.01,
0.05,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
1,
1.5,
2,
3,
]
localtime = ts.astimezone(pytz.timezone("America/Chicago"))
for sector in ["iowa", "midwest", "conus"]:
mp = MapPlot(
sector=sector,
title="Stage IV One Hour Precipitation",
subtitle="Hour Ending %s"
% (localtime.strftime("%d %B %Y %I %p %Z"),),
)
mp.pcolormesh(lons, lats, vals, clevs, units="inch")
pqstr = "plot %s %s00 %s_stage4_1h.png %s_stage4_1h_%s.png png" % (
routes,
ts.strftime("%Y%m%d%H"),
sector,
sector,
ts.strftime("%H"),
)
if sector == "iowa":
mp.drawcounties()
mp.postprocess(view=False, pqstr=pqstr)
mp.close()
def main(argv):
"""Go main Go"""
if len(argv) == 5:
ts = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
doit(ts)
else:
ts = utc()
doit(ts)
doit(ts - datetime.timedelta(hours=24))
doit(ts - datetime.timedelta(hours=48))
if __name__ == "__main__":
main(sys.argv)
| mit | 9,102,741,022,298,464,000 | 21.757895 | 75 | 0.50185 | false |
jarifibrahim/finance | forms.py | 1 | 1380 | from flask_wtf import Form
from wtforms import StringField, PasswordField, validators, SubmitField
# Render login form. u' indicated unicode encoding
class LoginForm(Form):
username = StringField('Username', [validators.InputRequired(message=(u'Invalid Username')),
validators.Length(min=4, max=25,
message=(u'Username should have at least 4 characters'))])
password = PasswordField('Password', [validators.InputRequired(message=(u'Password Required')),
validators.Length(min=4, max=25,
message=(u'Password should have at least 4 characters'))])
submit = SubmitField("Log In")
class RegisterForm(LoginForm):
email = StringField('Email', [validators.InputRequired(message=(u'Email Required')),
validators.Email(message=(u'Invalid Email id'))])
confirm = PasswordField('Repeat Password', [
validators.EqualTo('password', message=(u'Passwords must match! ')),
validators.InputRequired(message=(u'Password Required ')),
validators.Length(min=4, max=25, message=(u'Password should have at least 4 characters '))
])
submit = SubmitField('Register')
| mit | -6,423,164,901,180,357,000 | 61.727273 | 114 | 0.583333 | false |
kcyu1993/ML_course_kyu | projects/project1/scripts/gradient.py | 1 | 2752 | """
This file consists of operation related to gradient descent generally,
would be referenced by model.py and implementations.py
"""
import numpy as np
from data_utils import batch_iter
from costs import compute_loss
def compute_gradient(y, tx, w):
"""Compute the gradient."""
e = y - (tx).dot(w)
N = len(e)
gradient = -1 / N * (tx.T).dot(e)
return gradient
def gradient_least_square(y, tx, w, cost='mse'):
"""Compute the gradient."""
# ***************************************************
# INSERT YOUR CODE HERE
# TODO: compute gradient and loss
# ***************************************************
if cost is 'mse':
N = y.size
e = y - np.dot(tx, w)
return -1 / N * np.dot(tx.T, e)
elif cost is 'mae':
e = y - np.dot(tx, w)
return np.dot(tx.T, (-1) * np.sign(e)) / y.size
else:
raise Exception
def gradient_descent(y, tx, initial_w, gamma, max_iters):
"""Gradient descent algorithm."""
threshold = 1e-3 # determines convergence. To be tuned
# Define parameters to store w and loss
ws = [initial_w]
losses = []
w = initial_w
method = 'mse'
for n_iter in range(max_iters):
current_grad = gradient_least_square(y, tx, w)
current_loss = compute_loss(y, tx, w, method)
# Moving in the direction of negative gradient
w = w - gamma * current_grad
# Store w and loss
ws.append(w)
losses.append(current_loss)
# Convergence criteria
if len(losses) > 1 and np.abs(current_loss - losses[-1]) < threshold:
break
print("Gradient Descent({bi}): loss={l}".format(
bi=n_iter, l=current_loss))
return losses, ws
def stochastic_gradient_descent(
y, tx, initial_w, batch_size, gamma, max_iters):
"""Stochastic gradient descent algorithm."""
threshold = 1e-3 # determines convergence. To be tuned
# Define parameters to store w and loss
ws = [initial_w]
losses = []
w = initial_w
for n_iter in range(max_iters):
for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size, 1, True):
current_grad = compute_gradient(minibatch_y, minibatch_tx, w)
current_loss = compute_loss(y, tx, w)
# Moving in the direction of negative gradient
w = w - gamma * current_grad
# store w and loss
ws.append(np.copy(w))
losses.append(current_loss)
# Convergence criteria
if len(losses) > 1 and np.abs(current_loss - losses[-1]) < threshold:
break
print("Gradient Descent({bi}): loss={l}".format(
bi=n_iter, l=current_loss))
return losses, ws
| mit | 5,545,966,083,635,659,000 | 31.376471 | 81 | 0.563953 | false |
sbranson/online_crowdsourcing | crowdsourcing/util/multibox/export.py | 1 | 4021 | """
Export a model, loading in the moving averages and saving those.
"""
import argparse
import logging
import os
import pprint
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.framework import graph_util
import time
from config import parse_config_file
import model
def export(model_path, export_path, export_version, cfg):
graph = tf.Graph()
# Force all Variables to reside on the CPU.
with graph.as_default():
# For now we'll assume that the user is sending us a raveled array, totally preprocessed.
image_data = tf.placeholder(tf.float32, [None, cfg.INPUT_SIZE * cfg.INPUT_SIZE * 3], name="images")
batched_images = tf.reshape(image_data, [-1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
batch_norm_params = {
# Decay for the batch_norm moving averages.
'decay': cfg.BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
'variables_collections' : [tf.GraphKeys.MOVING_AVERAGE_VARIABLES],
'is_training' : False
}
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
weights_regularizer=slim.l2_regularizer(0.00004),
biases_regularizer=slim.l2_regularizer(0.00004)):
locations, confidences, inception_vars = model.build(
inputs = batched_images,
num_bboxes_per_cell = cfg.NUM_BBOXES_PER_CELL,
reuse=False,
scope=''
)
ema = tf.train.ExponentialMovingAverage(
decay=cfg.MOVING_AVERAGE_DECAY
)
shadow_vars = {
ema.average_name(var) : var
for var in slim.get_model_variables()
}
saver = tf.train.Saver(shadow_vars, reshape=True)
sess_config = tf.ConfigProto(
log_device_placement=False,
#device_filters = device_filters,
allow_soft_placement = True,
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=cfg.SESSION_CONFIG.PER_PROCESS_GPU_MEMORY_FRACTION
)
)
sess = tf.Session(graph=graph, config=sess_config)
with sess.as_default():
tf.global_variables_initializer().run()
saver.restore(sess, model_path)
v2c = graph_util.convert_variables_to_constants
deploy_graph_def = v2c(sess, graph.as_graph_def(), [locations.name[:-2], confidences.name[:-2]])
if not os.path.exists(export_path):
os.makedirs(export_path)
save_path = os.path.join(export_path, 'constant_model-%d.pb' % (export_version,))
with open(save_path, 'wb') as f:
f.write(deploy_graph_def.SerializeToString())
def parse_args():
parser = argparse.ArgumentParser(description='Test an Inception V3 network')
parser.add_argument('--checkpoint_path', dest='checkpoint_path',
help='Path to a model or a directory of checkpoints. The latest model will be used.',
required=True, type=str)
parser.add_argument('--export_path', dest='export_path',
help='Path to a directory where the exported model will be saved.',
required=True, type=str)
parser.add_argument('--export_version', dest='export_version',
help='Version number of the model.',
required=True, type=int)
parser.add_argument('--config', dest='config_file',
help='Path to the configuration file.',
required=True, type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print "Called with:"
print pprint.pprint(args)
cfg = parse_config_file(args.config_file)
print "Configurations:"
print pprint.pprint(cfg)
export(args.checkpoint_path, args.export_path, args.export_version, cfg=cfg) | mit | 8,448,636,228,364,823,000 | 31.699187 | 111 | 0.618752 | false |
pypa/warehouse | warehouse/search/tasks.py | 1 | 9822 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import os
import urllib
import certifi
import elasticsearch
import redis
import requests_aws4auth
from elasticsearch.helpers import parallel_bulk
from elasticsearch_dsl import serializer
from sqlalchemy import func
from sqlalchemy.orm import aliased
from warehouse import tasks
from warehouse.packaging.models import (
Classifier,
Description,
Project,
Release,
release_classifiers,
)
from warehouse.packaging.search import Project as ProjectDocument
from warehouse.search.utils import get_index
from warehouse.utils.db import windowed_query
def _project_docs(db, project_name=None):
releases_list = (
db.query(Release.id)
.filter(Release.yanked.is_(False), Release.files)
.order_by(
Release.project_id,
Release.is_prerelease.nullslast(),
Release._pypi_ordering.desc(),
)
.distinct(Release.project_id)
)
if project_name:
releases_list = releases_list.join(Project).filter(Project.name == project_name)
releases_list = releases_list.subquery()
r = aliased(Release, name="r")
all_versions = (
db.query(func.array_agg(r.version))
.filter(r.project_id == Release.project_id)
.correlate(Release)
.as_scalar()
.label("all_versions")
)
classifiers = (
db.query(func.array_agg(Classifier.classifier))
.select_from(release_classifiers)
.join(Classifier, Classifier.id == release_classifiers.c.trove_id)
.filter(Release.id == release_classifiers.c.release_id)
.correlate(Release)
.as_scalar()
.label("classifiers")
)
release_data = (
db.query(
Description.raw.label("description"),
Release.version.label("latest_version"),
all_versions,
Release.author,
Release.author_email,
Release.maintainer,
Release.maintainer_email,
Release.home_page,
Release.summary,
Release.keywords,
Release.platform,
Release.download_url,
Release.created,
classifiers,
Project.normalized_name,
Project.name,
Project.zscore,
)
.select_from(releases_list)
.join(Release, Release.id == releases_list.c.id)
.join(Description)
.outerjoin(Release.project)
)
for release in windowed_query(release_data, Release.project_id, 50000):
p = ProjectDocument.from_db(release)
p._index = None
p.full_clean()
doc = p.to_dict(include_meta=True)
doc.pop("_index", None)
yield doc
class SearchLock:
def __init__(self, redis_client, timeout=None, blocking_timeout=None):
self.lock = redis_client.lock(
"search-index", timeout=timeout, blocking_timeout=blocking_timeout
)
def __enter__(self):
if self.lock.acquire():
return self
else:
raise redis.exceptions.LockError("Could not acquire lock!")
def __exit__(self, type, value, tb):
self.lock.release()
@tasks.task(bind=True, ignore_result=True, acks_late=True)
def reindex(self, request):
"""
Recreate the Search Index.
"""
r = redis.StrictRedis.from_url(request.registry.settings["celery.scheduler_url"])
try:
with SearchLock(r, timeout=30 * 60, blocking_timeout=30):
p = urllib.parse.urlparse(request.registry.settings["elasticsearch.url"])
qs = urllib.parse.parse_qs(p.query)
kwargs = {
"hosts": [urllib.parse.urlunparse(p[:2] + ("",) * 4)],
"verify_certs": True,
"ca_certs": certifi.where(),
"timeout": 30,
"retry_on_timeout": True,
"serializer": serializer.serializer,
}
aws_auth = bool(qs.get("aws_auth", False))
if aws_auth:
aws_region = qs.get("region", ["us-east-1"])[0]
kwargs["connection_class"] = elasticsearch.RequestsHttpConnection
kwargs["http_auth"] = requests_aws4auth.AWS4Auth(
request.registry.settings["aws.key_id"],
request.registry.settings["aws.secret_key"],
aws_region,
"es",
)
client = elasticsearch.Elasticsearch(**kwargs)
number_of_replicas = request.registry.get("elasticsearch.replicas", 0)
refresh_interval = request.registry.get("elasticsearch.interval", "1s")
# We use a randomly named index so that we can do a zero downtime reindex.
# Essentially we'll use a randomly named index which we will use until all
# of the data has been reindexed, at which point we'll point an alias at
# our randomly named index, and then delete the old randomly named index.
# Create the new index and associate all of our doc types with it.
index_base = request.registry["elasticsearch.index"]
random_token = binascii.hexlify(os.urandom(5)).decode("ascii")
new_index_name = "{}-{}".format(index_base, random_token)
doc_types = request.registry.get("search.doc_types", set())
shards = request.registry.get("elasticsearch.shards", 1)
# Create the new index with zero replicas and index refreshes disabled
# while we are bulk indexing.
new_index = get_index(
new_index_name,
doc_types,
using=client,
shards=shards,
replicas=0,
interval="-1",
)
new_index.create(wait_for_active_shards=shards)
# From this point on, if any error occurs, we want to be able to delete our
# in progress index.
try:
request.db.execute("SET statement_timeout = '600s'")
for _ in parallel_bulk(
client, _project_docs(request.db), index=new_index_name
):
pass
except: # noqa
new_index.delete()
raise
finally:
request.db.rollback()
request.db.close()
# Now that we've finished indexing all of our data we can update the
# replicas and refresh intervals.
client.indices.put_settings(
index=new_index_name,
body={
"index": {
"number_of_replicas": number_of_replicas,
"refresh_interval": refresh_interval,
}
},
)
# Point the alias at our new randomly named index and delete the old index.
if client.indices.exists_alias(name=index_base):
to_delete = set()
actions = []
for name in client.indices.get_alias(name=index_base):
to_delete.add(name)
actions.append({"remove": {"index": name, "alias": index_base}})
actions.append({"add": {"index": new_index_name, "alias": index_base}})
client.indices.update_aliases({"actions": actions})
client.indices.delete(",".join(to_delete))
else:
client.indices.put_alias(name=index_base, index=new_index_name)
except redis.exceptions.LockError as exc:
raise self.retry(countdown=60, exc=exc)
@tasks.task(bind=True, ignore_result=True, acks_late=True)
def reindex_project(self, request, project_name):
r = redis.StrictRedis.from_url(request.registry.settings["celery.scheduler_url"])
try:
with SearchLock(r, timeout=15, blocking_timeout=1):
client = request.registry["elasticsearch.client"]
doc_types = request.registry.get("search.doc_types", set())
index_name = request.registry["elasticsearch.index"]
get_index(
index_name,
doc_types,
using=client,
shards=request.registry.get("elasticsearch.shards", 1),
replicas=request.registry.get("elasticsearch.replicas", 0),
)
for _ in parallel_bulk(
client, _project_docs(request.db, project_name), index=index_name
):
pass
except redis.exceptions.LockError as exc:
raise self.retry(countdown=60, exc=exc)
@tasks.task(bind=True, ignore_result=True, acks_late=True)
def unindex_project(self, request, project_name):
r = redis.StrictRedis.from_url(request.registry.settings["celery.scheduler_url"])
try:
with SearchLock(r, timeout=15, blocking_timeout=1):
client = request.registry["elasticsearch.client"]
index_name = request.registry["elasticsearch.index"]
try:
client.delete(index=index_name, doc_type="doc", id=project_name)
except elasticsearch.exceptions.NotFoundError:
pass
except redis.exceptions.LockError as exc:
raise self.retry(countdown=60, exc=exc)
| apache-2.0 | 7,808,638,181,856,625,000 | 36.064151 | 88 | 0.587151 | false |
AIRLab-POLIMI/ReLe | rele/doc/build.py | 1 | 1557 | #!/usr/bin/env python
# Build the documentation.
import sys, os
from subprocess import check_call, check_output, CalledProcessError, Popen, PIPE
def build_docs(srcPath):
print(srcPath)
print(os.path.join(srcPath, 'include/rele/'))
# Build docs.
# cmd = ['doxygen', '-']
# p = Popen(cmd, stdin=PIPE)
# p.communicate(input=r'''
# PROJECT_NAME = ReLe
# GENERATE_LATEX = NO
# GENERATE_MAN = NO
# GENERATE_RTF = NO
# CASE_SENSE_NAMES = NO
# INPUT = {0}
# FILE_PATTERNS = *.h
# RECURSIVE = YES
# QUIET = YES
# JAVADOC_AUTOBRIEF = NO
# AUTOLINK_SUPPORT = NO
# GENERATE_HTML = NO
# GENERATE_XML = YES
# XML_OUTPUT = {1}/doxyxml
# HTML_OUTPUT = {1}/doxyhtml
# ALIASES = "rst=\verbatim embed:rst"
# ALIASES += "endrst=\endverbatim"
# MACRO_EXPANSION = YES
# PREDEFINED = _WIN32=1 \
# FMT_USE_VARIADIC_TEMPLATES=1 \
# FMT_USE_RVALUE_REFERENCES=1 \
# FMT_USE_USER_DEFINED_LITERALS=1 \
# FMT_API=
# EXCLUDE_SYMBOLS = fmt::internal::* StringValue write_str
# '''.format(os.path.join(srcPath, 'include/rele/'),os.path.join(srcPath, 'doc/build')).encode('UTF-8'))
b = Popen(['make', 'html'], stdin=PIPE, cwd=os.path.join(srcPath, 'doc/'))
b.communicate(input=r' ')
if __name__ == '__main__':
build_docs(sys.argv[1])
| gpl-3.0 | 3,206,385,621,774,522,000 | 35.209302 | 108 | 0.519589 | false |
cournape/numscons | numscons/scons-local/scons-local-1.2.0/SCons/compat/_scons_hashlib.py | 1 | 2946 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
hashlib backwards-compatibility module for older (pre-2.5) Python versions
This does not not NOT (repeat, *NOT*) provide complete hashlib
functionality. It only wraps the portions of MD5 functionality used
by SCons, in an interface that looks like hashlib (or enough for our
purposes, anyway). In fact, this module will raise an ImportError if
the underlying md5 module isn't available.
"""
__revision__ = "src/engine/SCons/compat/_scons_hashlib.py 2009/09/04 16:33:07 david"
import md5
import string
class md5obj:
md5_module = md5
def __init__(self, name, string=''):
if not name in ('MD5', 'md5'):
raise ValueError, "unsupported hash type"
self.name = 'md5'
self.m = self.md5_module.md5()
def __repr__(self):
return '<%s HASH object @ %#x>' % (self.name, id(self))
def copy(self):
import copy
result = copy.copy(self)
result.m = self.m.copy()
return result
def digest(self):
return self.m.digest()
def update(self, arg):
return self.m.update(arg)
if hasattr(md5.md5(), 'hexdigest'):
def hexdigest(self):
return self.m.hexdigest()
else:
# Objects created by the underlying md5 module have no native
# hexdigest() method (*cough* 1.5.2 *cough*), so provide an
# equivalent lifted from elsewhere.
def hexdigest(self):
h = string.hexdigits
r = ''
for c in self.digest():
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
new = md5obj
def md5(string=''):
return md5obj('md5', string)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause | -9,077,548,005,859,742,000 | 31.373626 | 89 | 0.666327 | false |
ntt-sic/heat | heat/tests/test_cloudwatch.py | 1 | 3114 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from heat.common import exception
from heat.common import template_format
from heat.tests import common
from heat.tests import utils
from heat.engine import scheduler
from heat.engine import watchrule
AWS_CloudWatch_Alarm = '''
HeatTemplateFormatVersion: '2012-12-12'
Description: Template which tests alarms
Resources:
test_me:
Type: AWS::CloudWatch::Alarm
Properties:
MetricName: cpu_util
Namespace: AWS/EC2
Statistic: Average
Period: '60'
EvaluationPeriods: '1'
Threshold: '50'
ComparisonOperator: GreaterThanThreshold
'''
class CloudWatchAlarmTest(common.HeatTestCase):
def setUp(self):
super(CloudWatchAlarmTest, self).setUp()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
def parse_stack(self):
t = template_format.parse(AWS_CloudWatch_Alarm)
self.stack = utils.parse_stack(t)
return self.stack
@utils.stack_delete_after
def test_resource_create_good(self):
s = self.parse_stack()
self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
@utils.stack_delete_after
def test_resource_create_failed(self):
s = self.parse_stack()
with patch.object(watchrule.WatchRule, 'store') as bad_store:
bad_store.side_effect = KeyError('any random failure')
task_func = scheduler.TaskRunner(s['test_me'].create)
self.assertRaises(exception.ResourceFailure, task_func)
@utils.stack_delete_after
def test_resource_delete_good(self):
s = self.parse_stack()
self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
self.assertIsNone(scheduler.TaskRunner(s['test_me'].delete)())
@utils.stack_delete_after
@utils.wr_delete_after
def test_resource_delete_notfound(self):
# if a resource is not found, handle_delete() should not raise
# an exception.
s = self.parse_stack()
self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
res_name = self.stack['test_me'].physical_resource_name()
self.wr = watchrule.WatchRule.load(self.ctx,
watch_name=res_name)
with patch.object(watchrule.WatchRule, 'destroy') as bad_destroy:
watch_exc = exception.WatchRuleNotFound(watch_name='test')
bad_destroy.side_effect = watch_exc
self.assertIsNone(scheduler.TaskRunner(s['test_me'].delete)())
| apache-2.0 | 1,914,542,491,262,413,000 | 34.793103 | 78 | 0.673732 | false |
NickBayard/mlh | Parser.py | 1 | 1761 | import re
import bs4
from datetime import datetime
from copy import copy
class ParseError(Exception):
pass
class ParseChildIdError(ParseError):
pass
class ParseAvailableDatesError(ParseError):
pass
class Parser:
"""Parser is essentially the web page scraper that returns various
bits of information on a particular page."""
@staticmethod
def get_child_ids(text):
soup = bs4.BeautifulSoup(text, 'lxml')
ids = [i['name'] for i in soup.find_all('input', {'type': 'checkbox'})]
names = [font.text.strip() for font in soup.find_all('font')
if len(font.text.strip()) > 2][-1].split()
if len(names) != len(ids):
raise ParseChildIdError
for index, name in enumerate(copy(names)):
names[index] = name[:-2] if name.endswith('18') else name
child_ids = dict(zip(names, ids))
return child_ids
@staticmethod
def get_available_dates(text):
soup = bs4.BeautifulSoup(text, 'lxml')
dates = soup.find_all('td', {'class': 'calendar-available'})
links = [date.find('a')['href'] for date in dates]
open_dates = []
for link in links:
m = re.search("dosubmit\('(\d{8})',", link)
if m:
open_dates.append(datetime.strptime(m.group(1), '%Y%m%d').date())
if not open_dates:
raise ParseAvailableDatesError
return open_dates
@staticmethod
def get_available_times(text):
soup = bs4.BeautifulSoup(text, 'lxml')
times = soup.find_all('form', {'name': 'gridSubmitForm'})
formatted_times = [time.find('input', {'name': 'appt_start_time'})['value'] for time in times]
return formatted_times
| mit | -8,018,932,969,187,574,000 | 24.897059 | 102 | 0.597956 | false |
BitNetwork/pysys | pyfile.py | 1 | 5243 | import sys
import os
import tty
import termios
import shutil
import time
import signal
import math
width = 80
height = 20
terminalConfig = None
cd = os.getcwd()
display = []
columnLength = 20
selected = 0
keypressCache = []
def inputMode():
global terminalConfig
terminalConfig = termios.tcgetattr(sys.stdin)
tty.setcbreak(sys.stdin)
def resetMode():
global terminalConfig
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, terminalConfig)
def updateSize():
size = shutil.get_terminal_size((80, 20))
global width, height
width, height = size[0], size[1]
def clear():
#time.sleep(2)
# sys.stdout.write("\r" + chr(27) + "[H")
sys.stdout.write("\r" + (chr(27) + "[A") * height)
#time.sleep(2)
# sys.stdout.write((((" " * width) + "\r\n") * height)[:-2]) # No need to clear the stdout... the display is already empty and does that
# sys.stdout.write("\r" + chr(27) + "[H")
def flushDisplay():
global display
updateSize()
clear()
display = []
for index1d in range(height):
display.append([])
for index2d in range(width):
display[index1d].append(" ")
def updateDisplay():
global display
for line in range(len(display)):
if line != len(display) - 1:
sys.stdout.write(("".join(display[line]) + "\r\n"))
else:
sys.stdout.write("".join(display[line]))
sys.stdout.flush()
def draw(givenList, text, position, direction):
# Direction: 0: left to right, 1: right to left, 2: down to up, 3: up to down
if position[0] < 0:
position[0] = len(givenList) + position[0]
if position[1] < 0:
position[1] = len(givenList[0]) + position[1]
for index in range(len(text)):
if direction == 0:
givenList[position[0]][position[1] + index] = text[index]
elif direction == 1:
givenList[position[0]][position[1] + len(text) - index] = text[index]
elif direction == 2:
givenList[position[0] + index][position[1]] = text[index]
elif direction == 3:
givenList[position[0] + len(text) - 1 - index][position[1]] = text[index]
return givenList
def color(text, color, bgColor, style):
# See http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python/21786287#21786287, https://i.stack.imgur.com/6otvY.png, https://i.stack.imgur.com/lZr23.png & https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
# Color: Black: 30, Red: 31, Green: 32, Yellow: 33, Blue: 34, Purple: 35, Cyan: 36, White: 37
# Background color: Black: 40, Red: 41, Green: 42, Yellow: 43, Blue: 44, Purple: 45, Cyan: 46, White: 47
# Style: Normal: 0, Bold: 1, Thin: 2, Italic: 3, 4: Underline, 5: Increase brightness, 6: Increase brightness, 7: Inverse color & bgcolor
return "\x1b[" + str(style) + ";" + str(color) + ";" + str(bgColor) + "m" + text + "\x1b[0m"
def rgbColor(text, color, bgColor):
return "\x1b[38;2;" + str(color[0]) + ";" + str(color[1]) + ";" + str(color[2]) + ";48;2;" + str(bgColor[0]) + ";" + str(bgColor[1]) + ";" + str(bgColor[2]) + "m" + text + "\x1b[0m"
def rgbColorTile(givenList, start, end, color, bgColor):
if start[0] < 0:
start[0] = len(givenList) + start[0]
if start[1] < 0:
start[1] = len(givenList[0]) + start[1]
if end[0] < 0:
end[0] = len(givenList) + end[0] + 1
if end[1] < 0:
end[1] = len(givenList[0]) + end[1]
for indexY in range(end[0] - start[0]):
for indexX in range(end[1] - start[1]):
givenList[start[0] + indexY][start[1] + indexX] = rgbColor(givenList[start[0] + indexY][start[1] + indexX], color, bgColor)
def redraw(checkInput):
global display, width, height, keypressCache
flushDisplay()
if checkInput == True:
key = ord(sys.stdin.read(1))
keypressCache.append(key)
keypressCache = keypressCache[-5:]
draw(display, str(keypressCache), [6, 90], 0)
keyHandle(key)
# begin draw code #
# for line in range(len(display)): # Line numbers
# draw(display, str(line), [line, 0], 0)
size = str(height) + " x " + str(width) # Terminal size
draw(display, size, [-1, -1 - len(size)], 0)
dirList = os.listdir("/bin")
for file in range(len(dirList)):
column = math.floor(file / (height - 2))
row = file - (column * (height - 2))
filename = dirList[file]
if len(filename) >= columnLength:
filename = filename[:columnLength - 3] + ".."
draw(display, filename, [1 + row, 1 + column * columnLength], 0)
if file == selected:
rgbColorTile(display, [1 + row, 1 + column * columnLength], [2 + row, column * columnLength + columnLength], [0x10, 0x10, 0x10], [0xA0, 0xA0, 0xA0])
# colors
rgbColorTile(display, [0, -21], [-1, -20], [0x80, 0x80, 0x80], [0x5, 0x5, 0x5])
rgbColorTile(display, [0, 0], [-1, -1], [0xA0, 0xA0, 0xA0], [0x10, 0x10, 0x10]) # Paint everything c#101010 bgc#A0A0A0
# end draw code #
updateDisplay()
def keyHandle(key): # 27 91 66 - 27 91 65
global keypressCache, selected
draw(display, str(key), [0, 2], 0)
if keypressCache[-3:] == [27, 91, 66]: # down key
selected += 1
elif keypressCache[-3:] == [27, 91, 65]: # up key
selected -= 1
# time.sleep(3)
def signalHandler(signal, frame):
resetMode()
sys.exit(1)
signal.signal(signal.SIGINT, signalHandler)
inputMode()
redraw(False)
while 1:
redraw(True)
# time.sleep(1)
| mit | 1,662,445,574,557,499,100 | 33.267974 | 240 | 0.632272 | false |
low-sky/degas | degas/examples/gridgals.py | 1 | 3442 | from degas.gridding import gridGalaxy
datadir='/mnt/bigdata/erosolow/surveys/DEGAS/'
datadir = '/mnt/bigdata/erosolow/surveys/DEGAS/'
ppo = True
# gridGalaxy(galaxy='IC0342', setup='12CO',
# release='QA0', datadir=datadir,
# PostprocOnly=ppo)
gallist = [#'IC0342',
'NGC0337',
'NGC2146',
'NGC2903',
'NGC3147',
'NGC3521',
'NGC3631',
'NGC4030',
'NGC4038',
'NGC4258',
'NGC4321',
'NGC4414',
'NGC4501',
'NGC4535',
'NGC4569',
'NGC5055',
'NGC6946']
gallist = gallist[-2:]
HCNgals = gallist
# HCNgals = ['NGC2903', 'NGC2146', 'IC0342']
# HCNgals = ['IC0342']
for gal in HCNgals:
gridGalaxy(galaxy=gal, setup='HCN_HCO+',
release='QA0', datadir=datadir, PostprocOnly=ppo)
COgals = gallist
# COgals = ['NGC2903', 'NGC2146', 'IC0342']
# COgals = ['NGC2146']
# COgals = ['IC0342']
for gal in COgals:
gridGalaxy(galaxy=gal, setup='13CO_C18O',
release='QA0', datadir=datadir, PostprocOnly=ppo)
HCNgals = [
'NGC4038',
'NGC2146',
'NGC6946',
'NGC7331',
'NGC5248',
'NGC2903',
'NGC4321',
'NGC5055',
'NGC4501',
'NGC3147',
'NGC3521',
'NGC4414',
'NGC0337',
'NGC3631',
'NGC4030',
'NGC4258',
'NGC4535',
'NGC4569',
]
HCNgals=['IC0342']
COgals = [
'NGC4038',
'NGC2146',
'NGC7331',
'NGC2903',
'NGC4321',
'NGC5055',
'NGC4501',
'NGC3147',
'NGC0337',
'NGC4569',
'NGC3521',
'NGC3631',
'NGC4030',
'NGC4258',
'NGC4414',
'NGC4535',
'IC0342',
]
# gridGalaxy(galaxy='NGC5055', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC5055', setup='HCN_HCO+', release='QA0', datadir=datadir)
#gridGalaxy(galaxy='NGC7331', setup='HCN_HCO+',
# release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC6946', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4569', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4569', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4501', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4501', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4414', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4414', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4321', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC4321', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='NGC4038', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='NGC4038', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC3521', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2903', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2903', setup='13CO_C18O', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2146', setup='HCN_HCO+', release='QA0', datadir=datadir)
# gridGalaxy(galaxy='NGC2146', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='IC0342', setup='13CO_C18O', release='QA0', datadir=datadir)
# # gridGalaxy(galaxy='IC0342', setup='HCN_HCO+', release='QA0', datadir=datadir)
| gpl-3.0 | 5,651,762,327,323,637,000 | 25.682171 | 83 | 0.614468 | false |
land-pack/pyroom | examples/tornado_ioloop_add_future.py | 1 | 1250 | import tornado
from tornado.httpclient import HTTPRequest
from tornado.web import Application
from tornado.websocket import websocket_connect
from tornado.testing import AsyncHTTPTestCase, gen_test
def message_processed_callback(*args, **kwargs):
print 'Callback(args=%r, kwargs=%r)' % (args, kwargs)
class RealtimeHandler(tornado.websocket.WebSocketHandler):
def initialize(self):
self.io_loop = tornado.ioloop.IOLoop.instance()
def on_message(self, message):
future = self.on_some_message(message)
print 'The future:', future
self.io_loop.add_future(future, message_processed_callback)
@tornado.gen.coroutine
def on_some_message(self, message):
print 'Before sleep'
yield tornado.gen.sleep(3)
print 'After sleep'
self.write_message(message)
class ChatTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/rt', RealtimeHandler),
])
@gen_test
def test_reply(self):
request = HTTPRequest('ws://127.0.0.1:%d/rt' % self.get_http_port())
ws = yield websocket_connect(request)
ws.write_message('Hi')
response = yield ws.read_message()
print 'Response:', response | gpl-3.0 | -7,800,830,354,087,437,000 | 28.093023 | 76 | 0.6672 | false |
mbalasso/mynumpy | numpy/polynomial/tests/test_polynomial.py | 1 | 15429 | """Tests for polynomial module.
"""
from __future__ import division
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
def trim(x) :
return poly.polytrim(x, tol=1e-6)
T0 = [ 1]
T1 = [ 0, 1]
T2 = [-1, 0, 2]
T3 = [ 0, -3, 0, 4]
T4 = [ 1, 0, -8, 0, 8]
T5 = [ 0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [ 0, -7, 0, 56, 0, -112, 0, 64]
T8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestConstants(TestCase) :
def test_polydomain(self) :
assert_equal(poly.polydomain, [-1, 1])
def test_polyzero(self) :
assert_equal(poly.polyzero, [0])
def test_polyone(self) :
assert_equal(poly.polyone, [1])
def test_polyx(self) :
assert_equal(poly.polyx, [0, 1])
class TestArithmetic(TestCase) :
def test_polyadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = poly.polyadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polysub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = poly.polysub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polymulx(self):
assert_equal(poly.polymulx([0]), [0])
assert_equal(poly.polymulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i + 1) + [1]
assert_equal(poly.polymulx(ser), tgt)
def test_polymul(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += 1
res = poly.polymul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polydiv(self) :
# check zero division
assert_raises(ZeroDivisionError, poly.polydiv, [1], [0])
# check scalar division
quo, rem = poly.polydiv([2],[2])
assert_equal((quo, rem), (1, 0))
quo, rem = poly.polydiv([2,2],[2])
assert_equal((quo, rem), ((1,1), 0))
# check rest.
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1,2]
cj = [0]*j + [1,2]
tgt = poly.polyadd(ci, cj)
quo, rem = poly.polydiv(tgt, ci)
res = poly.polyadd(poly.polymul(quo, ci), rem)
assert_equal(res, tgt, err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([1., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = poly.polyval(x, [1., 2., 3.])
def test_polyval(self) :
#check empty input
assert_equal(poly.polyval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1,1)
y = [x**i for i in range(5)]
for i in range(5) :
tgt = y[i]
res = poly.polyval(x, [0]*i + [1])
assert_almost_equal(res, tgt)
tgt = x*(x**2 - 1)
res = poly.polyval(x, [0, -1, 0, 1])
assert_almost_equal(res, tgt)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(poly.polyval(x, [1]).shape, dims)
assert_equal(poly.polyval(x, [1,0]).shape, dims)
assert_equal(poly.polyval(x, [1,0,0]).shape, dims)
def test_polyval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = poly.polyval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = poly.polyval2d(z, z, self.c2d)
assert_(res.shape == (2,3))
def test_polyval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = poly.polyval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = poly.polyval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_polygrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = poly.polygrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = poly.polygrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_polygrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = poly.polygrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = poly.polygrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_polyint(self) :
# check exceptions
assert_raises(ValueError, poly.polyint, [0], .5)
assert_raises(ValueError, poly.polyint, [0], -1)
assert_raises(ValueError, poly.polyint, [0], 1, [0,0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = poly.polyint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
res = poly.polyint(pol, m=1, k=[i])
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
res = poly.polyint(pol, m=1, k=[i], lbnd=-1)
assert_almost_equal(poly.polyval(-1, res), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
res = poly.polyint(pol, m=1, k=[i], scl=2)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1)
res = poly.polyint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k])
res = poly.polyint(pol, m=j, k=range(j))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)
res = poly.polyint(pol, m=j, k=range(j), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k], scl=2)
res = poly.polyint(pol, m=j, k=range(j), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_polyint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T
res = poly.polyint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyint(c) for c in c2d])
res = poly.polyint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyint(c, k=3) for c in c2d])
res = poly.polyint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase) :
def test_polyder(self) :
# check exceptions
assert_raises(ValueError, poly.polyder, [0], .5)
assert_raises(ValueError, poly.polyder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [0]*i + [1]
res = poly.polyder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = poly.polyder(poly.polyint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_polyder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T
res = poly.polyder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyder(c) for c in c2d])
res = poly.polyder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_polyvander(self) :
# check for 1d x
x = np.arange(3)
v = poly.polyvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], poly.polyval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = poly.polyvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], poly.polyval(x, coef))
def test_polyvander2d(self) :
# also tests polyval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = poly.polyvander2d(x1, x2, [1, 2])
tgt = poly.polyval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = poly.polyvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_polyvander3d(self) :
# also tests polyval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = poly.polyvander3d(x1, x2, x3, [1, 2, 3])
tgt = poly.polyval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, poly.polycompanion, [])
assert_raises(ValueError, poly.polycompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(poly.polycompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
class TestMisc(TestCase) :
def test_polyfromroots(self) :
res = poly.polyfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = Tlist[i]
res = poly.polyfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res),trim(tgt))
def test_polyroots(self) :
assert_almost_equal(poly.polyroots([1]), [])
assert_almost_equal(poly.polyroots([1, 2]), [-.5])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = poly.polyroots(poly.polyfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_polyfit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, poly.polyfit, [1], [1], -1)
assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)
assert_raises(TypeError, poly.polyfit, [], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)
assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = poly.polyfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(poly.polyval(x, coef3), y)
#
coef4 = poly.polyfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(poly.polyval(x, coef4), y)
#
coef2d = poly.polyfit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
wcoef3 = poly.polyfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = poly.polyfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])
def test_polytrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, poly.polytrim, coef, -1)
# Test results
assert_equal(poly.polytrim(coef), coef[:-1])
assert_equal(poly.polytrim(coef, 1), coef[:-3])
assert_equal(poly.polytrim(coef, 2), [0])
def test_polyline(self) :
assert_equal(poly.polyline(3,4), [3, 4])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 5,020,763,907,547,989,000 | 31.14375 | 78 | 0.494977 | false |
tleonhardt/CodingPlayground | python/crypto/nacl_sign.py | 1 | 1736 | #!/usr/bin/env python3
# coding=utf-8
"""
Uses PyNaCl to sign a message using ed25519 digital signature algorithm
"""
import sys
import colorama
from colorama import Fore
from nacl.encoding import HexEncoder, RawEncoder
from nacl.signing import SigningKey
if __name__ == '__main__':
colorama.init(autoreset=True)
expected_args = 3
received_args = len(sys.argv) - 1
if received_args != expected_args:
print(Fore.RED + 'require {} arguments, but received {}'.format(expected_args, received_args))
print(Fore.CYAN + 'USAGE: {} <private_keyfile> <file_to_sign> <signature_file>'.format(sys.argv[0]))
sys.exit(1)
key_filename = sys.argv[1]
input_filename = sys.argv[2]
output_filename = sys.argv[3]
# Open the private key file and read in the signing key bytes
with open(key_filename, 'rb') as key_file:
keydata_bytes = key_file.read()
# Reconstruct the SigningKey instance from the serialized form
signing_key = SigningKey(keydata_bytes, encoder=RawEncoder)
# Print out the private Signing key
signing_hex = signing_key.encode(encoder=HexEncoder)
print(Fore.LIGHTBLUE_EX + 'the private key is {}'.format(signing_hex))
# Open the input file and read its data in as a message that we wish to sign
with open(input_filename, 'rb') as msg_file:
msg = msg_file.read()
# Sign a message with the signing key - this also containes the original message at the end
sig = signing_key.sign(msg)
# Save the signature to an output file
with open(output_filename, 'wb') as sig_file:
sig_file.write(sig)
print(Fore.GREEN + 'Saved signature to {!r} for message file {!r}'.format(output_filename, input_filename))
| mit | 6,721,731,631,513,710,000 | 33.72 | 111 | 0.684332 | false |
carmenfdezb/osmscout-server | scripts/import/poly.py | 1 | 1635 | from shapely.geometry import MultiPolygon, Polygon
# taken from http://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Python_Parsing
def parse_poly(fname):
""" Parse an Osmosis polygon filter file.
Accept a sequence of lines from a polygon file, return a shapely.geometry.MultiPolygon object.
http://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Format
"""
in_ring = False
coords = []
lines = open(fname, 'r')
for (index, line) in enumerate(lines):
if index == 0:
# first line is junk.
continue
elif index == 1:
# second line is the first polygon ring.
coords.append([[], []])
ring = coords[-1][0]
in_ring = True
elif in_ring and line.strip() == 'END':
# we are at the end of a ring, perhaps with more to come.
in_ring = False
elif in_ring:
# we are in a ring and picking up new coordinates.
ring.append(map(float, line.split()))
elif not in_ring and line.strip() == 'END':
# we are at the end of the whole polygon.
break
elif not in_ring and line.startswith('!'):
# we are at the start of a polygon part hole.
coords[-1][1].append([])
ring = coords[-1][1][-1]
in_ring = True
elif not in_ring:
# we are at the start of a polygon part.
coords.append([[], []])
ring = coords[-1][0]
in_ring = True
return MultiPolygon(coords)
| gpl-3.0 | -8,498,851,842,667,847,000 | 30.442308 | 102 | 0.540061 | false |
sekikn/ambari | ambari-agent/src/test/python/ambari_agent/examples/ControllerTester.py | 2 | 6310 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ambari_agent import Controller
import pprint, json, os, time, sys
import tempfile
from urllib2 import Request, urlopen, URLError
from mock.mock import patch, MagicMock, call
from ambari_agent.AmbariConfig import AmbariConfig
import Queue
import logging
from ambari_agent import PythonExecutor
logger=logging.getLogger()
queue = Queue.Queue()
# Set to True to replace python calls with mockups
disable_python = True
agent_version = "1.3.0"
# Values from the list below are returned in responce to agent requests (one per
# request). When every value has been returned, the last element of list is
# returned on every subsequent request.
responces = [
"""{"responseId":"n",
"response":"OK"}""",
"""
{
"responseId":"n",
"restartAgent": false,
"executionCommands":
[{
"commandId": "31-1",
"role" : "DATANODE",
"taskId" : 2,
"clusterName" : "clusterName",
"serviceName" : "HDFS",
"roleCommand" : "UPGRADE",
"hostname" : "localhost.localdomain",
"hostLevelParams": {},
"clusterHostInfo": "clusterHostInfo",
"configurations": {},
"commandType": "EXECUTION_COMMAND",
"configurations": {"global" : {}},
"roleParams": {},
"commandParams" : {
"source_stack_version": "{\\"stackName\\":\\"HDP\\",\\"stackVersion\\":\\"1.2.2\\"}",
"target_stack_version": "{\\"stackName\\":\\"HDP\\",\\"stackVersion\\":\\"1.3.0\\"}"
},
"clusterHostInfo": {
"ambari_db_server_host": [
"dev.hortonworks.com"
],
"ganglia_server_host": [
"dev.hortonworks.com"
],
"namenode_host": [
"dev.hortonworks.com"
],
"slave_hosts": [
"dev.hortonworks.com"
]
}
}],
"statusCommands":[]
}
""",
"""
{
"responseId":"n",
"restartAgent": false,
"executionCommands": [],
"statusCommands":[]
}
"""
]
class Int(object):
def __init__(self, value):
self.value = value
def inc(self):
self.value += 1
def val(self):
return self.value
responseId = Int(0)
def main():
if disable_python:
with patch.object(PythonExecutor.PythonExecutor, 'run_file') \
as run_file_py_method:
run_file_py_method.side_effect = \
lambda command, file, tmpoutfile, tmperrfile: {
'exitcode' : 0,
'stdout' : "Simulated run of py %s" % file,
'stderr' : 'None'
}
run_simulation()
else:
run_simulation()
def run_simulation():
Controller.logger = MagicMock()
sendRequest_method = MagicMock()
tmpfile = tempfile.gettempdir()
config = AmbariConfig().getConfig()
config.set('agent', 'prefix', tmpfile)
ver_file = os.path.join(tmpfile, "version")
with open(ver_file, "w") as text_file:
text_file.write(agent_version)
controller = Controller.Controller(config)
controller.sendRequest = sendRequest_method
controller.netutil.HEARTBEAT_IDLE_INTERVAL_DEFAULT_MAX_SEC = 0.1
controller.netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC = 0.1
controller.range = 1
for responce in responces:
queue.put(responce)
def send_stub(url, data):
logger.info("Controller sends data to %s :" % url)
logger.info(pprint.pformat(data))
if not queue.empty():
responce = queue.get()
else:
responce = responces[-1]
logger.info("There is no predefined responce available, sleeping for 30 sec")
time.sleep(30)
responce = json.loads(responce)
responseId.inc()
responce["responseId"] = responseId.val()
responce = json.dumps(responce)
logger.info("Returning data to Controller:" + responce)
return responce
sendRequest_method.side_effect = send_stub
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \
%(message)s")
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("Starting")
controller.start()
controller.actionQueue.IDLE_SLEEP_TIME = 0.1
controller.run()
if __name__ == '__main__':
# s = """
# {
# "responseId":"n",
# "restartAgent": false,
# "executionCommands":
# [{
# "commandId": "31-1",
# "role" : "DATANODE",
# "taskId" : 2,
# "clusterName" : "clusterName",
# "serviceName" : "HDFS",
# "roleCommand" : "UPGRADE",
# "hostname" : "localhost.localdomain",
# "hostLevelParams": {},
# "clusterHostInfo": "clusterHostInfo",
# "configurations": {},
# "commandType": "EXECUTION_COMMAND",
# "configurations": {"global" : {}},
# "roleParams": {},
# "commandParams" : {
# "commandParams": {"source_stack_version": "{\\"stackName\\":\\"HDP\\",\\"stackVersion\\":\\"1.2.0\\"}", "target_stack_version": "{\\"stackName\\":\\"HDP\\",\\"stackVersion\\":\\"1.2.2\\"}"}
# },
# "clusterHostInfo": {
# "ambari_db_server_host": [
# "dev.hortonworks.com"
# ],
# "ganglia_server_host": [
# "dev.hortonworks.com"
# ],
# "namenode_host": [
# "dev.hortonworks.com"
# ],
# "slave_hosts": [
# "dev.hortonworks.com"
# ]
# }
# }],
# "statusCommands":[]
# }
# """
# t = json.loads(s)
# pprint.pprint(t)
main()
| apache-2.0 | -7,774,303,345,511,106,000 | 26.554585 | 200 | 0.598732 | false |
google/ml-compiler-opt | compiler_opt/rl/trainer.py | 1 | 6179 | # coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LLVM Policy Trainer."""
import time
from absl import logging
import gin
import tensorflow as tf
from tf_agents.policies import policy_loader
from tf_agents.utils import common as common_utils
_INLINING_DEFAULT_KEY = 'inlining_default'
@gin.configurable
class Trainer(object):
"""Object that trains LLVM policy.
After initialization, the function 'train' can be called multiple times to
train on different datasets. An example usage:
```python
trainer = Trainer(root_dir, agent)
trainer.train(data_iter_1, num_iterations_1)
trainer.train(data_iter_2, num_iterations_2)
```
"""
def __init__(
self,
root_dir,
agent,
warmstart_policy_dir=None,
# Params for summaries and logging
checkpoint_interval=10000,
log_interval=100,
summary_interval=1000,
summaries_flush_secs=10):
"""Initialize the Trainer object.
Args:
root_dir: str, the root directory to host all required sub-directories.
agent: a tf_agents.agents.TFAgent object.
warmstart_policy_dir: the directory to warmstart the policy if given.
checkpoint_interval: int, the training step interval for saving
checkpoint.
log_interval: int, the training step interval for logging.
summary_interval: int, the training step interval for exporting to
tensorboard.
summaries_flush_secs: int, the seconds for flushing to tensorboard.
"""
self._root_dir = root_dir
self._agent = agent
self._checkpoint_interval = checkpoint_interval
self._log_interval = log_interval
self._summary_interval = summary_interval
self._summary_writer = tf.summary.create_file_writer(
self._root_dir, flush_millis=summaries_flush_secs * 1000)
self._summary_writer.set_as_default()
self._global_step = tf.compat.v1.train.get_or_create_global_step()
# Initialize agent and trajectory replay.
# Wrap training and trajectory replay in a tf.function to make it much
# faster.
self._agent.initialize()
self._agent.train = common_utils.function(self._agent.train)
self._initialize_metrics()
# Load warmstart policy before restoring from checkpoint.
if warmstart_policy_dir:
warmstart_policy = policy_loader.load(warmstart_policy_dir)
self._agent.policy.update(
policy=warmstart_policy,
tau=1.0,
tau_non_trainable=None,
sort_variables_by_name=False)
self._checkpointer = common_utils.Checkpointer(
ckpt_dir=self._root_dir,
agent=self._agent,
global_step=self._global_step)
self._checkpointer.initialize_or_restore()
self._start_time = time.time()
self._last_checkpoint_step = 0
self._last_log_step = 0
def _initialize_metrics(self):
"""Initializes metrics."""
self._data_action_mean = tf.keras.metrics.Mean()
self._data_reward_mean = tf.keras.metrics.Mean()
self._num_trajectories = tf.keras.metrics.Sum()
def _update_metrics(self, experience, monitor_dict):
"""Updates metrics and exports to Tensorboard."""
is_action = ~experience.is_boundary()
self._data_action_mean.update_state(
experience.action, sample_weight=is_action)
self._data_reward_mean.update_state(
experience.reward, sample_weight=is_action)
self._num_trajectories.update_state(experience.is_first())
with tf.name_scope('Monitor/'):
tf.summary.scalar(
name='data_action_mean',
data=self._data_action_mean.result(),
step=self._global_step)
tf.summary.scalar(
name='data_reward_mean',
data=self._data_reward_mean.result(),
step=self._global_step)
tf.summary.scalar(
name='num_trajectories',
data=self._num_trajectories.result(),
step=self._global_step)
for key, value in monitor_dict.items():
tf.summary.scalar(name=key, data=value, step=self._global_step)
tf.summary.histogram(
name='reward', data=experience.reward, step=self._global_step)
def _reset_metrics(self):
"""Reset all metrics."""
self._data_action_mean.reset_states()
self._data_reward_mean.reset_states()
self._num_trajectories.reset_states()
def _log_experiment(self, loss):
"""Log training info."""
global_step_val = self._global_step.numpy()
if global_step_val - self._last_log_step >= self._log_interval:
logging.info('step = %d, loss = %g', global_step_val, loss)
time_acc = time.time() - self._start_time
steps_per_sec = (global_step_val - self._last_log_step) / time_acc
logging.info('%.3f steps/sec', steps_per_sec)
self._last_log_step = global_step_val
self._start_time = time.time()
def _save_checkpoint(self):
if (self._global_step.numpy() - self._last_checkpoint_step >=
self._checkpoint_interval):
self._checkpointer.save(global_step=self._global_step)
self._last_checkpoint_step = self._global_step.numpy()
def global_step_numpy(self):
return self._global_step.numpy()
def train(self, dataset_iter, monitor_dict, num_iterations):
"""Trains policy with data from dataset_iter for num_iterations steps."""
self._reset_metrics()
with tf.summary.record_if(
lambda: tf.math.equal(self._global_step % self._summary_interval, 0)):
for _ in range(num_iterations):
experience = next(dataset_iter)
loss = self._agent.train(experience)
self._update_metrics(experience, monitor_dict)
self._log_experiment(loss.loss)
self._save_checkpoint()
| apache-2.0 | 8,987,349,337,808,561,000 | 33.519553 | 78 | 0.674381 | false |
torchingloom/edx-platform | lms/djangoapps/instructor/views/instructor_dashboard.py | 1 | 11063 | """
Instructor Dashboard Views
"""
from django.utils.translation import ugettext as _
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404
from django.conf import settings
from xmodule_modifiers import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore import XML_MODULESTORE_TYPE, Location
from xmodule.modulestore.django import modulestore
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_cms_course_link
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from bulk_email.models import CourseAuthorization
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url
from student.roles import CourseTeacherRole
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
course = get_course_by_id(course_id, depth=None)
is_studio_course = (modulestore().get_modulestore_type(course_id) != XML_MODULESTORE_TYPE)
access = {
'admin': request.user.is_staff,
'instructor': has_access(request.user, course, 'instructor'),
'staff': has_access(request.user, course, 'staff'),
'forum_admin': has_forum_access(
request.user, course_id, FORUM_ROLE_ADMINISTRATOR
),
}
if not access['staff']:
raise Http404()
sections = [
_section_course_info(course_id, access),
_section_membership(course_id, access),
_section_student_admin(course_id, access),
_section_data_download(course_id, access),
_section_analytics(course_id, access),
]
if (settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']):
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and is_studio_course and CourseAuthorization.instructor_email_enabled(course_id):
sections.append(_section_send_email(course_id, access, course))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course_id, access))
studio_url = None
if is_studio_course:
studio_url = get_cms_course_link(course)
enrollment_count = sections[0]['enrollment_count']
disable_buttons = False
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
disable_buttons = enrollment_count > max_enrollment_for_buttons
teacher_role = (
CourseTeacherRole(course.location, None).has_user(request.user)
)
context = {
'course': course,
'old_dashboard_url': reverse('instructor_dashboard', kwargs={'course_id': course_id}),
'studio_url': studio_url,
'sections': sections,
'disable_buttons': disable_buttons,
'teacher_role': teacher_role,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
"""
Section functions starting with _section return a dictionary of section data.
The dictionary must include at least {
'section_key': 'circus_expo'
'section_display_name': 'Circus Expo'
}
section_key will be used as a css attribute, javascript tie-in, and template import filename.
section_display_name will be used to generate link titles in the nav bar.
""" # pylint: disable=W0105
def _section_course_info(course_id, access):
""" Provide data for the corresponding dashboard section """
course = get_course_by_id(course_id, depth=None)
course_id_dict = Location.parse_course_id(course_id)
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_id,
'course_org': course_id_dict['org'],
'course_num': course_id_dict['course'],
'course_name': course_id_dict['name'],
'course_display_name': course.display_name,
'enrollment_count': CourseEnrollment.num_enrolled_in(course_id),
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': course_id}),
}
try:
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, course.grade_cutoffs.items(), "")[:-2]
except Exception:
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_id)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_item_errors(course.location)]
except Exception:
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course_id, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': course_id}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': course_id}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': course_id}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': course_id}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': course_id}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': course_id}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': course_id}),
}
return section_data
def _section_student_admin(course_id, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': course_id}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': course_id}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': course_id}),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': course_id}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': course_id}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unit.location.url())
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': course.id}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': course.id}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': course.id}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': course.id}),
}
return section_data
def _section_data_download(course_id, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': course_id}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': course_id}),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': course_id}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': course_id}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': course_id}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': course_id}),
}
return section_data
def _section_send_email(course_id, access, course):
""" Provide data for the corresponding bulk email section """
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, 'i4x://dummy_org/dummy_course/html/dummy_name')
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock('LmsRuntime', html_module, 'studio_view', fragment, None, extra_data={"course-id": course_id})
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': course_id}),
'editor': email_editor,
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': course_id}),
'email_background_tasks_url': reverse('list_background_email_tasks', kwargs={'course_id': course_id}),
}
return section_data
def _section_analytics(course_id, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'analytics',
'section_display_name': _('Analytics'),
'access': access,
'get_distribution_url': reverse('get_distribution', kwargs={'course_id': course_id}),
'proxy_legacy_analytics_url': reverse('proxy_legacy_analytics', kwargs={'course_id': course_id}),
}
return section_data
def _section_metrics(course_id, access):
"""Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'metrics',
'section_display_name': ('Metrics'),
'access': access,
'sub_section_display_name': get_section_display_name(course_id),
'section_has_problem': get_array_section_has_problem(course_id),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
}
return section_data
| agpl-3.0 | -1,831,634,395,016,032,000 | 42.727273 | 133 | 0.670614 | false |
ligregni/coursera | designalgo2/week5/tsp_bottom_up.py | 1 | 1459 | # Design and Analysis of Algorithms II
# Week 5
# Traveling Salesman Problem
# Bottom up approach
import math
import sys
def distance(a,b):
return math.sqrt( (a[0]-b[0])**2 + (a[1]-b[1])**2 )
def constructsets(m, n, i):
if m == 0:
return [0]
r = []
r += map(lambda x : x | i, constructsets(m-1, n-1, i*2))
if m < n:
r += constructsets(m, n-1, i*2)
return r
def getsets(m, n):
r = constructsets(m-1, n-1, 1)
return r
def getelem(s, exclude=None):
if exclude and exclude != 0:
s = remove_from_set(s, exclude)
r = []
if exclude != 0:
r.append(0)
i = 1
while s:
if s & 0x1:
r.append(i)
i += 1
s >>= 1
return r
def remove_from_set(s, j):
return s & ~(1 << (j-1))
n = int(raw_input())
mapa = list()
for i in xrange(n):
mapa.append(tuple(map(float, raw_input().split())))
A = [ [ sys.maxint ] * n for i in range(2**(n-1)) ]
A[1][0] = 0
for m in xrange(2,n+1):
print 'm', m
for s in getsets(m, n):
for j in getelem(s, 0):
for k in getelem(s, j):
if k == 0:
A[s][j] = min(A[s][j], A[s][k] + distance(mapa[j], mapa[k]))
else:
A[s][j] = min(A[s][j], A[remove_from_set(s,j)][k] + distance(mapa[j], mapa[k]))
r = sys.maxint
for j in xrange(1,n):
r = min(r, A[getsets(n, n)[0]][j] + distance(mapa[j], mapa[0]))
print int(r)
| bsd-3-clause | -6,299,900,798,455,404,000 | 21.106061 | 99 | 0.494859 | false |
leanix/leanix-sdk-python | src/leanix/models/Provider.py | 1 | 4357 | #!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
class Provider:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
self.swaggerTypes = {
'ID': 'str',
'displayName': 'str',
'parentID': 'str',
'level': 'long',
'name': 'str',
'reference': 'str',
'alias': 'str',
'description': 'str',
'providerCriticalityID': 'str',
'providerCriticalityDescription': 'str',
'providerQualityID': 'str',
'providerQualityDescription': 'str',
'objectStatusID': 'str',
'tags': 'list[str]',
'fullName': 'str',
'resourceType': 'str',
'completion': 'str',
'qualitySealExpiry': 'str',
'modificationTime': 'str',
'factSheetHasParents': 'list[FactSheetHasParent]',
'factSheetHasChildren': 'list[FactSheetHasChild]',
'factSheetHasDocuments': 'list[FactSheetHasDocument]',
'factSheetHasLifecycles': 'list[FactSheetHasLifecycle]',
'userSubscriptions': 'list[UserSubscription]',
'factSheetHasPredecessors': 'list[FactSheetHasPredecessor]',
'factSheetHasSuccessors': 'list[FactSheetHasSuccessor]',
'factSheetHasRequires': 'list[FactSheetHasRequires]',
'factSheetHasRequiredby': 'list[FactSheetHasRequiredby]',
'resourceHasProviders': 'list[ResourceHasProvider]',
'projectHasProviders': 'list[ProjectHasProvider]'
}
self.ID = None # str
self.displayName = None # str
self.parentID = None # str
self.level = None # long
self.name = None # str
self.reference = None # str
self.alias = None # str
self.description = None # str
self.providerCriticalityID = None # str
self.providerCriticalityDescription = None # str
self.providerQualityID = None # str
self.providerQualityDescription = None # str
self.objectStatusID = None # str
self.tags = None # list[str]
self.fullName = None # str
self.resourceType = None # str
self.completion = None # str
self.qualitySealExpiry = None # str
self.modificationTime = None # str
self.factSheetHasParents = None # list[FactSheetHasParent]
self.factSheetHasChildren = None # list[FactSheetHasChild]
self.factSheetHasDocuments = None # list[FactSheetHasDocument]
self.factSheetHasLifecycles = None # list[FactSheetHasLifecycle]
self.userSubscriptions = None # list[UserSubscription]
self.factSheetHasPredecessors = None # list[FactSheetHasPredecessor]
self.factSheetHasSuccessors = None # list[FactSheetHasSuccessor]
self.factSheetHasRequires = None # list[FactSheetHasRequires]
self.factSheetHasRequiredby = None # list[FactSheetHasRequiredby]
self.resourceHasProviders = None # list[ResourceHasProvider]
self.projectHasProviders = None # list[ProjectHasProvider]
| mit | -3,824,801,298,034,065,000 | 44.385417 | 105 | 0.662153 | false |
becm/meson | mesonbuild/cmake/interpreter.py | 1 | 56165 | # Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
import pkg_resources
from .common import CMakeException, CMakeTarget, TargetOptions
from .client import CMakeClient, RequestCMakeInputs, RequestConfigure, RequestCompute, RequestCodeModel
from .fileapi import CMakeFileAPI
from .executor import CMakeExecutor
from .traceparser import CMakeTraceParser, CMakeGeneratorTarget
from .. import mlog, mesonlib
from ..environment import Environment
from ..mesonlib import MachineChoice, OrderedSet, version_compare
from ..compilers.compilers import lang_suffixes, header_suffixes, obj_suffixes, lib_suffixes, is_header
from enum import Enum
from functools import lru_cache
from pathlib import Path
import typing as T
import os, re
from ..mparser import (
Token,
BaseNode,
CodeBlockNode,
FunctionNode,
ArrayNode,
ArgumentNode,
AssignmentNode,
BooleanNode,
StringNode,
IdNode,
IndexNode,
MethodNode,
NumberNode,
)
if T.TYPE_CHECKING:
from ..build import Build
from ..backend.backends import Backend
# Disable all warnings automaticall enabled with --trace and friends
# See https://cmake.org/cmake/help/latest/variable/CMAKE_POLICY_WARNING_CMPNNNN.html
disable_policy_warnings = [
'CMP0025',
'CMP0047',
'CMP0056',
'CMP0060',
'CMP0065',
'CMP0066',
'CMP0067',
'CMP0082',
'CMP0089',
]
backend_generator_map = {
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs2010': 'Visual Studio 10 2010',
'vs2015': 'Visual Studio 15 2017',
'vs2017': 'Visual Studio 15 2017',
'vs2019': 'Visual Studio 16 2019',
}
language_map = {
'c': 'C',
'cpp': 'CXX',
'cuda': 'CUDA',
'objc': 'OBJC',
'objcpp': 'OBJCXX',
'cs': 'CSharp',
'java': 'Java',
'fortran': 'Fortran',
'swift': 'Swift',
}
target_type_map = {
'STATIC_LIBRARY': 'static_library',
'MODULE_LIBRARY': 'shared_module',
'SHARED_LIBRARY': 'shared_library',
'EXECUTABLE': 'executable',
'OBJECT_LIBRARY': 'static_library',
'INTERFACE_LIBRARY': 'header_only'
}
skip_targets = ['UTILITY']
blacklist_compiler_flags = [
'-Wall', '-Wextra', '-Weverything', '-Werror', '-Wpedantic', '-pedantic', '-w',
'/W1', '/W2', '/W3', '/W4', '/Wall', '/WX', '/w',
'/O1', '/O2', '/Ob', '/Od', '/Og', '/Oi', '/Os', '/Ot', '/Ox', '/Oy', '/Ob0',
'/RTC1', '/RTCc', '/RTCs', '/RTCu',
'/Z7', '/Zi', '/ZI',
]
blacklist_link_flags = [
'/machine:x64', '/machine:x86', '/machine:arm', '/machine:ebc',
'/debug', '/debug:fastlink', '/debug:full', '/debug:none',
'/incremental',
]
blacklist_clang_cl_link_flags = ['/GR', '/EHsc', '/MDd', '/Zi', '/RTC1']
blacklist_link_libs = [
'kernel32.lib',
'user32.lib',
'gdi32.lib',
'winspool.lib',
'shell32.lib',
'ole32.lib',
'oleaut32.lib',
'uuid.lib',
'comdlg32.lib',
'advapi32.lib'
]
transfer_dependencies_from = ['header_only']
_cmake_name_regex = re.compile(r'[^_a-zA-Z0-9]')
def _sanitize_cmake_name(name: str) -> str:
name = _cmake_name_regex.sub('_', name)
return 'cm_' + name
class OutputTargetMap:
rm_so_version = re.compile(r'(\.[0-9]+)+$')
def __init__(self, build_dir: str):
self.tgt_map = {}
self.build_dir = build_dir
def add(self, tgt: T.Union['ConverterTarget', 'ConverterCustomTarget']) -> None:
def assign_keys(keys: T.List[str]) -> None:
for i in [x for x in keys if x]:
self.tgt_map[i] = tgt
keys = [self._target_key(tgt.cmake_name)]
if isinstance(tgt, ConverterTarget):
keys += [tgt.full_name]
keys += [self._rel_artifact_key(x) for x in tgt.artifacts]
keys += [self._base_artifact_key(x) for x in tgt.artifacts]
if isinstance(tgt, ConverterCustomTarget):
keys += [self._rel_generated_file_key(x) for x in tgt.original_outputs]
keys += [self._base_generated_file_key(x) for x in tgt.original_outputs]
assign_keys(keys)
def _return_first_valid_key(self, keys: T.List[str]) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
for i in keys:
if i and i in self.tgt_map:
return self.tgt_map[i]
return None
def target(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._target_key(name)])
def executable(self, name: str) -> T.Optional['ConverterTarget']:
tgt = self.target(name)
if tgt is None or not isinstance(tgt, ConverterTarget):
return None
if tgt.meson_func() != 'executable':
return None
return tgt
def artifact(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
keys = []
candidates = [name, OutputTargetMap.rm_so_version.sub('', name)]
for i in lib_suffixes:
if not name.endswith('.' + i):
continue
new_name = name[:-len(i) - 1]
new_name = OutputTargetMap.rm_so_version.sub('', new_name)
candidates += ['{}.{}'.format(new_name, i)]
for i in candidates:
keys += [self._rel_artifact_key(i), os.path.basename(i), self._base_artifact_key(i)]
return self._return_first_valid_key(keys)
def generated(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._rel_generated_file_key(name), self._base_generated_file_key(name)])
# Utility functions to generate local keys
def _rel_path(self, fname: str) -> T.Optional[str]:
fname = os.path.normpath(os.path.join(self.build_dir, fname))
if os.path.commonpath([self.build_dir, fname]) != self.build_dir:
return None
return os.path.relpath(fname, self.build_dir)
def _target_key(self, tgt_name: str) -> str:
return '__tgt_{}__'.format(tgt_name)
def _rel_generated_file_key(self, fname: str) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relgen_{}__'.format(path) if path else None
def _base_generated_file_key(self, fname: str) -> str:
return '__gen_{}__'.format(os.path.basename(fname))
def _rel_artifact_key(self, fname: str) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relart_{}__'.format(path) if path else None
def _base_artifact_key(self, fname: str) -> str:
return '__art_{}__'.format(os.path.basename(fname))
class ConverterTarget:
def __init__(self, target: CMakeTarget, env: Environment):
self.env = env
self.artifacts = target.artifacts
self.src_dir = target.src_dir
self.build_dir = target.build_dir
self.name = target.name
self.cmake_name = target.name
self.full_name = target.full_name
self.type = target.type
self.install = target.install
self.install_dir = ''
self.link_libraries = target.link_libraries
self.link_flags = target.link_flags + target.link_lang_flags
self.depends_raw = []
self.depends = []
if target.install_paths:
self.install_dir = target.install_paths[0]
self.languages = []
self.sources = []
self.generated = []
self.includes = []
self.sys_includes = []
self.link_with = []
self.object_libs = []
self.compile_opts = {}
self.public_compile_opts = []
self.pie = False
# Project default override options (c_std, cpp_std, etc.)
self.override_options = []
# Convert the target name to a valid meson target name
self.name = _sanitize_cmake_name(self.name)
for i in target.files:
# Determine the meson language
lang_cmake_to_meson = {val.lower(): key for key, val in language_map.items()}
lang = lang_cmake_to_meson.get(i.language.lower(), 'c')
if lang not in self.languages:
self.languages += [lang]
if lang not in self.compile_opts:
self.compile_opts[lang] = []
# Add arguments, but avoid duplicates
args = i.flags
args += ['-D{}'.format(x) for x in i.defines]
self.compile_opts[lang] += [x for x in args if x not in self.compile_opts[lang]]
# Handle include directories
self.includes += [x['path'] for x in i.includes if x not in self.includes and not x['isSystem']]
self.sys_includes += [x['path'] for x in i.includes if x not in self.sys_includes and x['isSystem']]
# Add sources to the right array
if i.is_generated:
self.generated += i.sources
else:
self.sources += i.sources
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.name)
std_regex = re.compile(r'([-]{1,2}std=|/std:v?|[-]{1,2}std:)(.*)')
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: str, subdir: str, install_prefix: str, trace: CMakeTraceParser) -> None:
# Detect setting the C and C++ standard
for i in ['c', 'cpp']:
if i not in self.compile_opts:
continue
temp = []
for j in self.compile_opts[i]:
m = ConverterTarget.std_regex.match(j)
if m:
std = m.group(2)
supported = self._all_lang_stds(i)
if std not in supported:
mlog.warning(
'Unknown {0}_std "{1}" -> Ignoring. Try setting the project-'
'level {0}_std if build errors occur. Known '
'{0}_stds are: {2}'.format(i, std, ' '.join(supported)),
once=True
)
continue
self.override_options += ['{}_std={}'.format(i, std)]
elif j in ['-fPIC', '-fpic', '-fPIE', '-fpie']:
self.pie = True
elif j in blacklist_compiler_flags:
pass
else:
temp += [j]
self.compile_opts[i] = temp
# Make sure to force enable -fPIC for OBJECT libraries
if self.type.upper() == 'OBJECT_LIBRARY':
self.pie = True
# Use the CMake trace, if required
tgt = trace.targets.get(self.cmake_name)
if tgt:
self.depends_raw = trace.targets[self.cmake_name].depends
# TODO refactor this copy paste from CMakeDependency for future releases
reg_is_lib = re.compile(r'^(-l[a-zA-Z0-9_]+|-l?pthread)$')
to_process = [self.cmake_name]
processed = []
while len(to_process) > 0:
curr = to_process.pop(0)
if curr in processed or curr not in trace.targets:
continue
tgt = trace.targets[curr]
cfgs = []
cfg = ''
otherDeps = []
libraries = []
mlog.debug(tgt)
if 'INTERFACE_INCLUDE_DIRECTORIES' in tgt.properties:
self.includes += [x for x in tgt.properties['INTERFACE_INCLUDE_DIRECTORIES'] if x]
if 'INTERFACE_LINK_OPTIONS' in tgt.properties:
self.link_flags += [x for x in tgt.properties['INTERFACE_LINK_OPTIONS'] if x]
if 'INTERFACE_COMPILE_DEFINITIONS' in tgt.properties:
self.public_compile_opts += ['-D' + re.sub('^-D', '', x) for x in tgt.properties['INTERFACE_COMPILE_DEFINITIONS'] if x]
if 'INTERFACE_COMPILE_OPTIONS' in tgt.properties:
self.public_compile_opts += [x for x in tgt.properties['INTERFACE_COMPILE_OPTIONS'] if x]
if 'IMPORTED_CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['IMPORTED_CONFIGURATIONS'] if x]
cfg = cfgs[0]
if 'CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['CONFIGURATIONS'] if x]
cfg = cfgs[0]
is_debug = self.env.coredata.get_builtin_option('debug');
if is_debug:
if 'DEBUG' in cfgs:
cfg = 'DEBUG'
elif 'RELEASE' in cfgs:
cfg = 'RELEASE'
else:
if 'RELEASE' in cfgs:
cfg = 'RELEASE'
if 'IMPORTED_IMPLIB_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB_{}'.format(cfg)] if x]
elif 'IMPORTED_IMPLIB' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB'] if x]
elif 'IMPORTED_LOCATION_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION_{}'.format(cfg)] if x]
elif 'IMPORTED_LOCATION' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION'] if x]
if 'LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['LINK_LIBRARIES'] if x]
if 'INTERFACE_LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['INTERFACE_LINK_LIBRARIES'] if x]
if 'IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg) in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg)] if x]
elif 'IMPORTED_LINK_DEPENDENT_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES'] if x]
for j in otherDeps:
if j in trace.targets:
to_process += [j]
elif reg_is_lib.match(j) or os.path.exists(j):
libraries += [j]
for j in libraries:
if j not in self.link_libraries:
self.link_libraries += [j]
processed += [curr]
elif self.type.upper() not in ['EXECUTABLE', 'OBJECT_LIBRARY']:
mlog.warning('CMake: Target', mlog.bold(self.cmake_name), 'not found in CMake trace. This can lead to build errors')
temp = []
for i in self.link_libraries:
# Let meson handle this arcane magic
if ',-rpath,' in i:
continue
if not os.path.isabs(i):
link_with = output_target_map.artifact(i)
if link_with:
self.link_with += [link_with]
continue
temp += [i]
self.link_libraries = temp
# Filter out files that are not supported by the language
supported = list(header_suffixes) + list(obj_suffixes)
for i in self.languages:
supported += list(lang_suffixes[i])
supported = ['.{}'.format(x) for x in supported]
self.sources = [x for x in self.sources if any([x.endswith(y) for y in supported])]
self.generated = [x for x in self.generated if any([x.endswith(y) for y in supported])]
# Make paths relative
def rel_path(x: str, is_header: bool, is_generated: bool) -> T.Optional[str]:
if not os.path.isabs(x):
x = os.path.normpath(os.path.join(self.src_dir, x))
if not os.path.exists(x) and not any([x.endswith(y) for y in obj_suffixes]) and not is_generated:
mlog.warning('CMake: path', mlog.bold(x), 'does not exist.')
mlog.warning(' --> Ignoring. This can lead to build errors.')
return None
if Path(x) in trace.explicit_headers:
return None
if (
os.path.isabs(x)
and os.path.commonpath([x, self.env.get_source_dir()]) == self.env.get_source_dir()
and not (
os.path.commonpath([x, root_src_dir]) == root_src_dir or
os.path.commonpath([x, self.env.get_build_dir()]) == self.env.get_build_dir()
)
):
mlog.warning('CMake: path', mlog.bold(x), 'is inside the root project but', mlog.bold('not'), 'inside the subproject.')
mlog.warning(' --> Ignoring. This can lead to build errors.')
return None
if os.path.isabs(x) and os.path.commonpath([x, self.env.get_build_dir()]) == self.env.get_build_dir():
if is_header:
return os.path.relpath(x, os.path.join(self.env.get_build_dir(), subdir))
else:
return os.path.relpath(x, root_src_dir)
if os.path.isabs(x) and os.path.commonpath([x, root_src_dir]) == root_src_dir:
return os.path.relpath(x, root_src_dir)
return x
def custom_target(x: str):
ctgt = output_target_map.generated(x)
if ctgt:
assert(isinstance(ctgt, ConverterCustomTarget))
ref = ctgt.get_ref(x)
assert(isinstance(ref, CustomTargetReference) and ref.valid())
return ref
return x
build_dir_rel = os.path.relpath(self.build_dir, os.path.join(self.env.get_build_dir(), subdir))
self.includes = list(OrderedSet([rel_path(x, True, False) for x in OrderedSet(self.includes)] + [build_dir_rel]))
self.sys_includes = list(OrderedSet([rel_path(x, True, False) for x in OrderedSet(self.sys_includes)]))
self.sources = [rel_path(x, False, False) for x in self.sources]
self.generated = [rel_path(x, False, True) for x in self.generated]
# Resolve custom targets
self.generated = [custom_target(x) for x in self.generated]
# Remove delete entries
self.includes = [x for x in self.includes if x is not None]
self.sys_includes = [x for x in self.sys_includes if x is not None]
self.sources = [x for x in self.sources if x is not None]
self.generated = [x for x in self.generated if x is not None]
# Make sure '.' is always in the include directories
if '.' not in self.includes:
self.includes += ['.']
# make install dir relative to the install prefix
if self.install_dir and os.path.isabs(self.install_dir):
if os.path.commonpath([self.install_dir, install_prefix]) == install_prefix:
self.install_dir = os.path.relpath(self.install_dir, install_prefix)
# Remove blacklisted options and libs
def check_flag(flag: str) -> bool:
if flag.lower() in blacklist_link_flags or flag in blacklist_compiler_flags + blacklist_clang_cl_link_flags:
return False
if flag.startswith('/D'):
return False
return True
self.link_libraries = [x for x in self.link_libraries if x.lower() not in blacklist_link_libs]
self.link_flags = [x for x in self.link_flags if check_flag(x)]
# Handle explicit CMake add_dependency() calls
for i in self.depends_raw:
tgt = output_target_map.target(i)
if tgt:
self.depends.append(tgt)
def process_object_libs(self, obj_target_list: T.List['ConverterTarget'], linker_workaround: bool):
# Try to detect the object library(s) from the generated input sources
temp = [x for x in self.generated if isinstance(x, str)]
temp = [os.path.basename(x) for x in temp]
temp = [x for x in temp if any([x.endswith('.' + y) for y in obj_suffixes])]
temp = [os.path.splitext(x)[0] for x in temp]
exts = self._all_source_suffixes()
# Temp now stores the source filenames of the object files
for i in obj_target_list:
source_files = [x for x in i.sources + i.generated if isinstance(x, str)]
source_files = [os.path.basename(x) for x in source_files]
for j in temp:
# On some platforms (specifically looking at you Windows with vs20xy backend) CMake does
# not produce object files with the format `foo.cpp.obj`, instead it skipps the language
# suffix and just produces object files like `foo.obj`. Thus we have to do our best to
# undo this step and guess the correct language suffix of the object file. This is done
# by trying all language suffixes meson knows and checking if one of them fits.
candidates = [j] # type: T.List[str]
if not any([j.endswith('.' + x) for x in exts]):
mlog.warning('Object files do not contain source file extensions, thus falling back to guessing them.', once=True)
candidates += ['{}.{}'.format(j, x) for x in exts]
if any([x in source_files for x in candidates]):
if linker_workaround:
self._append_objlib_sources(i)
else:
self.includes += i.includes
self.includes = list(OrderedSet(self.includes))
self.object_libs += [i]
break
# Filter out object files from the sources
self.generated = [x for x in self.generated if not isinstance(x, str) or not any([x.endswith('.' + y) for y in obj_suffixes])]
def _append_objlib_sources(self, tgt: 'ConverterTarget') -> None:
self.includes += tgt.includes
self.sources += tgt.sources
self.generated += tgt.generated
self.sources = list(OrderedSet(self.sources))
self.generated = list(OrderedSet(self.generated))
self.includes = list(OrderedSet(self.includes))
# Inherit compiler arguments since they may be required for building
for lang, opts in tgt.compile_opts.items():
if lang not in self.compile_opts:
self.compile_opts[lang] = []
self.compile_opts[lang] += [x for x in opts if x not in self.compile_opts[lang]]
@lru_cache(maxsize=None)
def _all_source_suffixes(self) -> T.List[str]:
suffixes = [] # type: T.List[str]
for exts in lang_suffixes.values():
suffixes += [x for x in exts]
return suffixes
@lru_cache(maxsize=None)
def _all_lang_stds(self, lang: str) -> T.List[str]:
lang_opts = self.env.coredata.compiler_options.build.get(lang, None)
if not lang_opts or 'std' not in lang_opts:
return []
return lang_opts['std'].choices
def process_inter_target_dependencies(self):
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(OrderedSet(new_deps))
def cleanup_dependencies(self):
# Clear the dependencies from targets that where moved from
if self.meson_func() in transfer_dependencies_from:
self.depends = []
def meson_func(self) -> str:
return target_type_map.get(self.type.upper())
def log(self) -> None:
mlog.log('Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- artifacts: ', mlog.bold(str(self.artifacts)))
mlog.log(' -- full_name: ', mlog.bold(self.full_name))
mlog.log(' -- type: ', mlog.bold(self.type))
mlog.log(' -- install: ', mlog.bold('true' if self.install else 'false'))
mlog.log(' -- install_dir: ', mlog.bold(self.install_dir))
mlog.log(' -- link_libraries: ', mlog.bold(str(self.link_libraries)))
mlog.log(' -- link_with: ', mlog.bold(str(self.link_with)))
mlog.log(' -- object_libs: ', mlog.bold(str(self.object_libs)))
mlog.log(' -- link_flags: ', mlog.bold(str(self.link_flags)))
mlog.log(' -- languages: ', mlog.bold(str(self.languages)))
mlog.log(' -- includes: ', mlog.bold(str(self.includes)))
mlog.log(' -- sys_includes: ', mlog.bold(str(self.sys_includes)))
mlog.log(' -- sources: ', mlog.bold(str(self.sources)))
mlog.log(' -- generated: ', mlog.bold(str(self.generated)))
mlog.log(' -- pie: ', mlog.bold('true' if self.pie else 'false'))
mlog.log(' -- override_opts: ', mlog.bold(str(self.override_options)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
mlog.log(' -- options:')
for key, val in self.compile_opts.items():
mlog.log(' -', key, '=', mlog.bold(str(val)))
class CustomTargetReference:
def __init__(self, ctgt: 'ConverterCustomTarget', index: int):
self.ctgt = ctgt # type: ConverterCustomTarget
self.index = index # type: int
def __repr__(self) -> str:
if self.valid():
return '<{}: {} [{}]>'.format(self.__class__.__name__, self.ctgt.name, self.ctgt.outputs[self.index])
else:
return '<{}: INVALID REFERENCE>'.format(self.__class__.__name__)
def valid(self) -> bool:
return self.ctgt is not None and self.index >= 0
def filename(self) -> str:
return self.ctgt.outputs[self.index]
class ConverterCustomTarget:
tgt_counter = 0 # type: int
out_counter = 0 # type: int
def __init__(self, target: CMakeGeneratorTarget):
assert(target.current_bin_dir is not None)
assert(target.current_src_dir is not None)
self.name = target.name
if not self.name:
self.name = 'custom_tgt_{}'.format(ConverterCustomTarget.tgt_counter)
ConverterCustomTarget.tgt_counter += 1
self.cmake_name = str(self.name)
self.original_outputs = list(target.outputs)
self.outputs = [os.path.basename(x) for x in self.original_outputs]
self.conflict_map = {}
self.command = target.command
self.working_dir = target.working_dir
self.depends_raw = target.depends
self.inputs = []
self.depends = []
self.current_bin_dir = Path(target.current_bin_dir)
self.current_src_dir = Path(target.current_src_dir)
# Convert the target name to a valid meson target name
self.name = _sanitize_cmake_name(self.name)
def __repr__(self) -> str:
return '<{}: {} {}>'.format(self.__class__.__name__, self.name, self.outputs)
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: str, subdir: str, all_outputs: T.List[str]) -> None:
# Default the working directory to ${CMAKE_CURRENT_BINARY_DIR}
if not self.working_dir:
self.working_dir = self.current_bin_dir.as_posix()
# relative paths in the working directory are always relative
# to ${CMAKE_CURRENT_BINARY_DIR}
if not os.path.isabs(self.working_dir):
self.working_dir = (self.current_bin_dir / self.working_dir).as_posix()
# Modify the original outputs if they are relative. Again,
# relative paths are relative to ${CMAKE_CURRENT_BINARY_DIR}
def ensure_absolute(x: Path) -> Path:
if x.is_absolute():
return x
else:
return self.current_bin_dir / x
self.original_outputs = [ensure_absolute(Path(x)).as_posix() for x in self.original_outputs]
# Ensure that there is no duplicate output in the project so
# that meson can handle cases where the same filename is
# generated in multiple directories
temp_outputs = [] # type: T.List[str]
for i in self.outputs:
if i in all_outputs:
old = str(i)
i = 'c{}_{}'.format(ConverterCustomTarget.out_counter, i)
ConverterCustomTarget.out_counter += 1
self.conflict_map[old] = i
all_outputs += [i]
temp_outputs += [i]
self.outputs = temp_outputs
# Check if the command is a build target
commands = []
for i in self.command:
assert(isinstance(i, list))
cmd = []
for j in i:
if not j:
continue
target = output_target_map.executable(j)
cmd += [target] if target else [j]
commands += [cmd]
self.command = commands
# If the custom target does not declare any output, create a dummy
# one that can be used as dependency.
if not self.outputs:
self.outputs = [self.name + '.h']
# Check dependencies and input files
root = Path(root_src_dir)
for i in self.depends_raw:
if not i:
continue
raw = Path(i)
art = output_target_map.artifact(i)
tgt = output_target_map.target(i)
gen = output_target_map.generated(i)
rel_to_root = None
try:
rel_to_root = raw.relative_to(root)
except ValueError:
rel_to_root = None
# First check for existing files. Only then check for existing
# targets, etc. This reduces the chance of misdetecting input files
# as outputs from other targets.
# See https://github.com/mesonbuild/meson/issues/6632
if not raw.is_absolute() and (self.current_src_dir / raw).exists():
self.inputs += [(self.current_src_dir / raw).relative_to(root).as_posix()]
elif raw.is_absolute() and raw.exists() and rel_to_root is not None:
self.inputs += [rel_to_root.as_posix()]
elif art:
self.depends += [art]
elif tgt:
self.depends += [tgt]
elif gen:
self.inputs += [gen.get_ref(i)]
def process_inter_target_dependencies(self):
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(OrderedSet(new_deps))
def get_ref(self, fname: str) -> T.Optional[CustomTargetReference]:
fname = os.path.basename(fname)
try:
if fname in self.conflict_map:
fname = self.conflict_map[fname]
idx = self.outputs.index(fname)
return CustomTargetReference(self, idx)
except ValueError:
return None
def log(self) -> None:
mlog.log('Custom Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- command: ', mlog.bold(str(self.command)))
mlog.log(' -- outputs: ', mlog.bold(str(self.outputs)))
mlog.log(' -- conflict_map: ', mlog.bold(str(self.conflict_map)))
mlog.log(' -- working_dir: ', mlog.bold(str(self.working_dir)))
mlog.log(' -- depends_raw: ', mlog.bold(str(self.depends_raw)))
mlog.log(' -- inputs: ', mlog.bold(str(self.inputs)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
class CMakeAPI(Enum):
SERVER = 1
FILE = 2
class CMakeInterpreter:
def __init__(self, build: 'Build', subdir: str, src_dir: str, install_prefix: str, env: Environment, backend: 'Backend'):
assert(hasattr(backend, 'name'))
self.build = build
self.subdir = subdir
self.src_dir = src_dir
self.build_dir_rel = os.path.join(subdir, '__CMake_build')
self.build_dir = os.path.join(env.get_build_dir(), self.build_dir_rel)
self.install_prefix = install_prefix
self.env = env
self.backend_name = backend.name
self.linkers = set() # type: T.Set[str]
self.cmake_api = CMakeAPI.SERVER
self.client = CMakeClient(self.env)
self.fileapi = CMakeFileAPI(self.build_dir)
# Raw CMake results
self.bs_files = []
self.codemodel_configs = None
self.raw_trace = None
# Analysed data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = [] # type: T.List[ConverterCustomTarget]
self.trace = CMakeTraceParser('', '') # Will be replaced in analyse
self.output_target_map = OutputTargetMap(self.build_dir)
# Generated meson data
self.generated_targets = {}
self.internal_name_map = {}
def configure(self, extra_cmake_options: T.List[str]) -> None:
for_machine = MachineChoice.HOST # TODO make parameter
# Find CMake
cmake_exe = CMakeExecutor(self.env, '>=3.7', for_machine)
if not cmake_exe.found():
raise CMakeException('Unable to find CMake')
self.trace = CMakeTraceParser(cmake_exe.version(), self.build_dir, permissive=True)
preload_file = pkg_resources.resource_filename('mesonbuild', 'cmake/data/preload.cmake')
# Prefere CMAKE_PROJECT_INCLUDE over CMAKE_TOOLCHAIN_FILE if possible,
# since CMAKE_PROJECT_INCLUDE was actually designed for code injection.
preload_var = 'CMAKE_PROJECT_INCLUDE'
if version_compare(cmake_exe.version(), '<3.15'):
preload_var = 'CMAKE_TOOLCHAIN_FILE'
generator = backend_generator_map[self.backend_name]
cmake_args = []
trace_args = self.trace.trace_args()
cmcmp_args = ['-DCMAKE_POLICY_WARNING_{}=OFF'.format(x) for x in disable_policy_warnings]
pload_args = ['-D{}={}'.format(preload_var, str(preload_file))]
if version_compare(cmake_exe.version(), '>=3.14'):
self.cmake_api = CMakeAPI.FILE
self.fileapi.setup_request()
# Map meson compiler to CMake variables
for lang, comp in self.env.coredata.compilers[for_machine].items():
if lang not in language_map:
continue
self.linkers.add(comp.get_linker_id())
cmake_lang = language_map[lang]
exelist = comp.get_exelist()
if len(exelist) == 1:
cmake_args += ['-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[0])]
elif len(exelist) == 2:
cmake_args += ['-DCMAKE_{}_COMPILER_LAUNCHER={}'.format(cmake_lang, exelist[0]),
'-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[1])]
if hasattr(comp, 'get_linker_exelist') and comp.get_id() == 'clang-cl':
cmake_args += ['-DCMAKE_LINKER={}'.format(comp.get_linker_exelist()[0])]
cmake_args += ['-G', generator]
cmake_args += ['-DCMAKE_INSTALL_PREFIX={}'.format(self.install_prefix)]
cmake_args += extra_cmake_options
# Run CMake
mlog.log()
with mlog.nested():
mlog.log('Configuring the build directory with', mlog.bold('CMake'), 'version', mlog.cyan(cmake_exe.version()))
mlog.log(mlog.bold('Running:'), ' '.join(cmake_args))
mlog.log(mlog.bold(' - build directory: '), self.build_dir)
mlog.log(mlog.bold(' - source directory: '), self.src_dir)
mlog.log(mlog.bold(' - trace args: '), ' '.join(trace_args))
mlog.log(mlog.bold(' - preload file: '), str(preload_file))
mlog.log(mlog.bold(' - disabled policy warnings:'), '[{}]'.format(', '.join(disable_policy_warnings)))
mlog.log()
os.makedirs(self.build_dir, exist_ok=True)
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
final_args = cmake_args + trace_args + cmcmp_args + pload_args + [self.src_dir]
cmake_exe.set_exec_mode(print_cmout=True, always_capture_stderr=self.trace.requires_stderr())
rc, _, self.raw_trace = cmake_exe.call(final_args, self.build_dir, env=os_env, disable_cache=True)
mlog.log()
h = mlog.green('SUCCEEDED') if rc == 0 else mlog.red('FAILED')
mlog.log('CMake configuration:', h)
if rc != 0:
raise CMakeException('Failed to configure the CMake subproject')
def initialise(self, extra_cmake_options: T.List[str]) -> None:
# Run configure the old way because doing it
# with the server doesn't work for some reason
# Additionally, the File API requires a configure anyway
self.configure(extra_cmake_options)
# Continue with the file API If supported
if self.cmake_api is CMakeAPI.FILE:
# Parse the result
self.fileapi.load_reply()
# Load the buildsystem file list
cmake_files = self.fileapi.get_cmake_sources()
self.bs_files = [x.file for x in cmake_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(x, self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(OrderedSet(self.bs_files))
# Load the codemodel configurations
self.codemodel_configs = self.fileapi.get_cmake_configurations()
return
with self.client.connect():
generator = backend_generator_map[self.backend_name]
self.client.do_handshake(self.src_dir, self.build_dir, generator, 1)
# Do a second configure to initialise the server
self.client.query_checked(RequestConfigure(), 'CMake server configure')
# Generate the build system files
self.client.query_checked(RequestCompute(), 'Generating build system files')
# Get CMake build system files
bs_reply = self.client.query_checked(RequestCMakeInputs(), 'Querying build system files')
# Now get the CMake code model
cm_reply = self.client.query_checked(RequestCodeModel(), 'Querying the CMake code model')
src_dir = bs_reply.src_dir
self.bs_files = [x.file for x in bs_reply.build_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(os.path.join(src_dir, x), self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(OrderedSet(self.bs_files))
self.codemodel_configs = cm_reply.configs
def analyse(self) -> None:
if self.codemodel_configs is None:
raise CMakeException('CMakeInterpreter was not initialized')
# Clear analyser data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = []
# Parse the trace
self.trace.parse(self.raw_trace)
# Find all targets
added_target_names = [] # type: T.List[str]
for i in self.codemodel_configs:
for j in i.projects:
if not self.project_name:
self.project_name = j.name
for k in j.targets:
# Avoid duplicate targets from different configurations and known
# dummy CMake internal target types
if k.type not in skip_targets and k.name not in added_target_names:
added_target_names += [k.name]
self.targets += [ConverterTarget(k, self.env)]
# Add interface targets from trace, if not already present.
# This step is required because interface targets were removed from
# the CMake file API output.
api_target_name_list = [x.name for x in self.targets]
for i in self.trace.targets.values():
if i.type != 'INTERFACE' or i.name in api_target_name_list or i.imported:
continue
dummy = CMakeTarget({
'name': i.name,
'type': 'INTERFACE_LIBRARY',
'sourceDirectory': self.src_dir,
'buildDirectory': self.build_dir,
})
self.targets += [ConverterTarget(dummy, self.env)]
for i in self.trace.custom_targets:
self.custom_targets += [ConverterCustomTarget(i)]
# generate the output_target_map
for i in [*self.targets, *self.custom_targets]:
self.output_target_map.add(i)
# First pass: Basic target cleanup
object_libs = []
custom_target_outputs = [] # type: T.List[str]
for i in self.custom_targets:
i.postprocess(self.output_target_map, self.src_dir, self.subdir, custom_target_outputs)
for i in self.targets:
i.postprocess(self.output_target_map, self.src_dir, self.subdir, self.install_prefix, self.trace)
if i.type == 'OBJECT_LIBRARY':
object_libs += [i]
self.languages += [x for x in i.languages if x not in self.languages]
# Second pass: Detect object library dependencies
for i in self.targets:
i.process_object_libs(object_libs, self._object_lib_workaround())
# Third pass: Reassign dependencies to avoid some loops
for i in self.targets:
i.process_inter_target_dependencies()
for i in self.custom_targets:
i.process_inter_target_dependencies()
# Fourth pass: Remove rassigned dependencies
for i in self.targets:
i.cleanup_dependencies()
mlog.log('CMake project', mlog.bold(self.project_name), 'has', mlog.bold(str(len(self.targets) + len(self.custom_targets))), 'build targets.')
def pretend_to_be_meson(self, options: TargetOptions) -> CodeBlockNode:
if not self.project_name:
raise CMakeException('CMakeInterpreter was not analysed')
def token(tid: str = 'string', val='') -> Token:
return Token(tid, self.subdir, 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value):
if isinstance(value, str):
return string(value)
elif isinstance(value, bool):
return BooleanNode(token(val=value))
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
return value
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements if x is not None]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args=None, kwargs=None) -> FunctionNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None}
func_n = FunctionNode(self.subdir, 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args=None, kwargs=None) -> MethodNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None}
return MethodNode(self.subdir, 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir, 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function('project', [self.project_name] + self.languages)]
# Add the run script for custom commands
# Add the targets
processing = []
processed = {}
name_map = {}
def extract_tgt(tgt: T.Union[ConverterTarget, ConverterCustomTarget, CustomTargetReference]) -> IdNode:
tgt_name = None
if isinstance(tgt, (ConverterTarget, ConverterCustomTarget)):
tgt_name = tgt.name
elif isinstance(tgt, CustomTargetReference):
tgt_name = tgt.ctgt.name
assert(tgt_name is not None and tgt_name in processed)
res_var = processed[tgt_name]['tgt']
return id_node(res_var) if res_var else None
def detect_cycle(tgt: T.Union[ConverterTarget, ConverterCustomTarget]) -> None:
if tgt.name in processing:
raise CMakeException('Cycle in CMake inputs/dependencies detected')
processing.append(tgt.name)
def resolve_ctgt_ref(ref: CustomTargetReference) -> BaseNode:
tgt_var = extract_tgt(ref)
if len(ref.ctgt.outputs) == 1:
return tgt_var
else:
return indexed(tgt_var, ref.index)
def process_target(tgt: ConverterTarget):
detect_cycle(tgt)
# First handle inter target dependencies
link_with = []
objec_libs = [] # type: T.List[IdNode]
sources = []
generated = []
generated_filenames = []
custom_targets = []
dependencies = []
for i in tgt.link_with:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
link_with += [extract_tgt(i)]
for i in tgt.object_libs:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
objec_libs += [extract_tgt(i)]
for i in tgt.depends:
if not isinstance(i, ConverterCustomTarget):
continue
if i.name not in processed:
process_custom_target(i)
dependencies += [extract_tgt(i)]
# Generate the source list and handle generated sources
for i in tgt.sources + tgt.generated:
if isinstance(i, CustomTargetReference):
if i.ctgt.name not in processed:
process_custom_target(i.ctgt)
generated += [resolve_ctgt_ref(i)]
generated_filenames += [i.filename()]
if i.ctgt not in custom_targets:
custom_targets += [i.ctgt]
else:
sources += [i]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for i in custom_targets:
for j in i.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(i.get_ref(j))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
inc_var = '{}_inc'.format(tgt.name)
dir_var = '{}_dir'.format(tgt.name)
sys_var = '{}_sys'.format(tgt.name)
src_var = '{}_src'.format(tgt.name)
dep_var = '{}_dep'.format(tgt.name)
tgt_var = tgt.name
install_tgt = options.get_install(tgt.cmake_name, tgt.install)
# Generate target kwargs
tgt_kwargs = {
'build_by_default': install_tgt,
'link_args': options.get_link_args(tgt.cmake_name, tgt.link_flags + tgt.link_libraries),
'link_with': link_with,
'include_directories': id_node(inc_var),
'install': install_tgt,
'override_options': options.get_override_options(tgt.cmake_name, tgt.override_options),
'objects': [method(x, 'extract_all_objects') for x in objec_libs],
}
# Only set if installed and only override if it is set
if install_tgt and tgt.install_dir:
tgt_kwargs['install_dir'] = tgt.install_dir
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs['{}_args'.format(key)] = options.get_compile_args(tgt.cmake_name, key, val)
# Handle -fPCI, etc
if tgt_func == 'executable':
tgt_kwargs['pie'] = tgt.pie
elif tgt_func == 'static_library':
tgt_kwargs['pic'] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': id_node(tgt_var),
'compile_args': tgt.public_compile_opts,
'include_directories': id_node(inc_var),
}
if dependencies:
generated += dependencies
# Generate the function nodes
dir_node = assign(dir_var, function('include_directories', tgt.includes))
sys_node = assign(sys_var, function('include_directories', tgt.sys_includes, {'is_system': True}))
inc_node = assign(inc_var, array([id_node(dir_var), id_node(sys_var)]))
node_list = [dir_node, sys_node, inc_node]
if tgt_func == 'header_only':
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
src_var = None
tgt_var = None
else:
src_node = assign(src_var, function('files', sources))
tgt_node = assign(tgt_var, function(tgt_func, [tgt_var, [id_node(src_var)] + generated], tgt_kwargs))
node_list += [src_node, tgt_node]
if tgt_func in ['static_library', 'shared_library']:
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
else:
dep_var = None
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {'inc': inc_var, 'src': src_var, 'dep': dep_var, 'tgt': tgt_var, 'func': tgt_func}
name_map[tgt.cmake_name] = tgt.name
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This additionally allows setting the working
# directory.
detect_cycle(tgt)
tgt_var = tgt.name # type: str
def resolve_source(x: T.Any) -> T.Any:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return extract_tgt(x)
if isinstance(x, ConverterCustomTarget):
if x.name not in processed:
process_custom_target(x)
return extract_tgt(x)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = []
command += mesonlib.meson_command
command += ['--internal', 'cmake_run_ctgt']
command += ['-o', '@OUTPUT@']
if tgt.original_outputs:
command += ['-O'] + tgt.original_outputs
command += ['-d', tgt.working_dir]
# Generate the commands. Subcommands are separated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [';;;']
tgt_kwargs = {
'input': [resolve_source(x) for x in tgt.inputs],
'output': tgt.outputs,
'command': command,
'depends': [resolve_source(x) for x in tgt.depends],
}
root_cb.lines += [assign(tgt_var, function('custom_target', [tgt.name], tgt_kwargs))]
processed[tgt.name] = {'inc': None, 'src': None, 'dep': None, 'tgt': tgt_var, 'func': 'custom_target'}
name_map[tgt.cmake_name] = tgt.name
# Now generate the target function calls
for i in self.custom_targets:
if i.name not in processed:
process_custom_target(i)
for i in self.targets:
if i.name not in processed:
process_target(i)
self.generated_targets = processed
self.internal_name_map = name_map
return root_cb
def target_info(self, target: str) -> T.Optional[T.Dict[str, str]]:
# Try resolving the target name
# start by checking if there is a 100% match (excluding the name prefix)
prx_tgt = _sanitize_cmake_name(target)
if prx_tgt in self.generated_targets:
return self.generated_targets[prx_tgt]
# check if there exists a name mapping
if target in self.internal_name_map:
target = self.internal_name_map[target]
assert(target in self.generated_targets)
return self.generated_targets[target]
return None
def target_list(self) -> T.List[str]:
return list(self.internal_name_map.keys())
def _object_lib_workaround(self) -> bool:
return 'link' in self.linkers and self.backend_name.startswith('vs')
| apache-2.0 | -7,696,294,225,233,389,000 | 42.237105 | 150 | 0.565673 | false |
calispac/digicampipe | digicampipe/scripts/spe.py | 1 | 14553 | #!/usr/bin/env python
"""
Do the Single Photoelectron anaylsis
Usage:
digicam-spe [options] [--] <INPUT>...
Options:
-h --help Show this screen.
--max_events=N Maximum number of events to analyse.
--max_histo_filename=FILE File path of the max histogram.
[Default: ./max_histo.pk]
--charge_histo_filename=FILE File path of the charge histogram
[Default: ./charge_histo.pk]
--raw_histo_filename=FILE File path of the raw histogram
[Default: ./raw_histo.pk]
-o OUTPUT --output=OUTPUT Output file path to store the results.
[Default: ./results.npz]
-c --compute Compute the data.
-f --fit Fit.
-d --display Display.
-v --debug Enter the debug mode.
-p --pixel=<PIXEL> Give a list of pixel IDs.
--shift=N Number of bins to shift before integrating
[default: 0].
--integral_width=N Number of bins to integrate over
[default: 7].
--pulse_finder_threshold=F Threshold of pulse finder in arbitrary units
[default: 2.0].
--save_figures=PATH Save the plots to the indicated folder.
Figures are not saved is set to none
[default: none]
--ncall=N Number of calls for the fit [default: 10000]
--n_samples=N Number of samples per waveform
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
from histogram.histogram import Histogram1D
from tqdm import tqdm
from digicampipe.calib.baseline import fill_baseline, subtract_baseline
from digicampipe.calib.charge import compute_charge
from digicampipe.calib.peak import find_pulse_with_max, \
find_pulse_fast
from digicampipe.io.event_stream import calibration_event_stream
from digicampipe.scripts import raw
from digicampipe.scripts.fmpe import FMPEFitter
from digicampipe.utils.docopt import convert_pixel_args, \
convert_int, convert_text
from digicampipe.utils.pdf import fmpe_pdf_10
class MaxHistoFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 2
super(MaxHistoFitter, self).__init__(histogram, estimated_gain,
n_peaks, **kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_0': None, 'a_1': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_0, a_1):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': a_0, 'a_1': a_1, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
class SPEFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 4
super(SPEFitter, self).__init__(histogram, estimated_gain, n_peaks,
**kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_1': None, 'a_2': None, 'a_3': None,
'a_4': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_1, a_2, a_3, a_4):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': 0, 'a_1': a_1, 'a_2': a_2,
'a_3': a_3, 'a_4': a_4, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
def initialize_fit(self):
init_params = super(SPEFitter, self).initialize_fit()
init_params['a_4'] = init_params['a_3']
init_params['a_3'] = init_params['a_2']
init_params['a_2'] = init_params['a_1']
init_params['a_1'] = init_params['a_0']
init_params['baseline'] = init_params['baseline'] - init_params['gain']
del init_params['a_0']
self.initial_parameters = init_params
return init_params
def compute_dark_rate(number_of_zeros, total_number_of_events, time):
p_0 = number_of_zeros / total_number_of_events
rate = - np.log(p_0)
rate /= time
return rate
def compute_max_histo(files, histo_filename, pixel_id, max_events,
integral_width, shift, baseline):
n_pixels = len(pixel_id)
if not os.path.exists(histo_filename):
events = calibration_event_stream(files, pixel_id=pixel_id,
max_events=max_events)
# events = compute_baseline_with_min(events)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
events = find_pulse_with_max(events)
events = compute_charge(events, integral_width, shift)
max_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * integral_width,
4095 * integral_width),
)
for event in events:
max_histo.fill(event.data.reconstructed_charge)
max_histo.save(histo_filename)
return max_histo
else:
max_histo = Histogram1D.load(histo_filename)
return max_histo
def compute_spe(files, histo_filename, pixel_id, baseline, max_events,
integral_width, shift, pulse_finder_threshold, debug=False):
if not os.path.exists(histo_filename):
n_pixels = len(pixel_id)
events = calibration_event_stream(files,
max_events=max_events,
pixel_id=pixel_id)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
# events = find_pulse_1(events, 0.5, 20)
# events = find_pulse_2(events, widths=[5, 6], threshold_sigma=2)
events = find_pulse_fast(events, threshold=pulse_finder_threshold)
# events = find_pulse_fast_2(events, threshold=pulse_finder_threshold,
# min_dist=3)
# events = find_pulse_correlate(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_gaussian_filter(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_wavelets(events, widths=[4, 5, 6],
# threshold_sigma=2)
events = compute_charge(events, integral_width=integral_width,
shift=shift)
# events = compute_amplitude(events)
# events = fit_template(events)
# events = compute_full_waveform_charge(events)
spe_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * 50, 4095 * 50)
)
for event in events:
spe_histo.fill(event.data.reconstructed_charge)
spe_histo.save(histo_filename)
return spe_histo
else:
spe_histo = Histogram1D.load(histo_filename)
return spe_histo
def entry():
args = docopt(__doc__)
files = args['<INPUT>']
debug = args['--debug']
max_events = convert_int(args['--max_events'])
raw_histo_filename = args['--raw_histo_filename']
charge_histo_filename = args['--charge_histo_filename']
max_histo_filename = args['--max_histo_filename']
results_filename = args['--output']
pixel_id = convert_pixel_args(args['--pixel'])
n_pixels = len(pixel_id)
integral_width = int(args['--integral_width'])
shift = int(args['--shift'])
pulse_finder_threshold = float(args['--pulse_finder_threshold'])
n_samples = int(args['--n_samples']) # TODO access this in a better way !
estimated_gain = 20
ncall = int(args['--ncall'])
if args['--compute']:
raw_histo = raw.compute(files, max_events=max_events,
pixel_id=pixel_id, filename=raw_histo_filename)
baseline = raw_histo.mode()
compute_max_histo(files, max_histo_filename, pixel_id, max_events,
integral_width, shift, baseline)
compute_spe(files, charge_histo_filename, pixel_id, baseline,
max_events, integral_width, shift, pulse_finder_threshold,
debug=debug)
if args['--fit']:
spe_histo = Histogram1D.load(charge_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
dark_count_rate = np.zeros(n_pixels) * np.nan
electronic_noise = np.zeros(n_pixels) * np.nan
crosstalk = np.zeros(n_pixels) * np.nan
gain = np.zeros(n_pixels) * np.nan
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = max_histo[i]
fitter = MaxHistoFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
n_entries = histo.data.sum()
number_of_zeros = fitter.parameters['a_0']
window_length = 4 * n_samples
rate = compute_dark_rate(number_of_zeros,
n_entries,
window_length)
electronic_noise[i] = fitter.parameters['sigma_e']
dark_count_rate[i] = rate
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute dark count rate'
' in pixel {}'.format(pixel))
print(e)
np.savez(results_filename, dcr=dark_count_rate,
sigma_e=electronic_noise, pixel_id=pixel_id)
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = spe_histo[i]
fitter = SPEFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
params = fitter.parameters
n_entries = params['a_1']
n_entries += params['a_2']
n_entries += params['a_3']
n_entries += params['a_4']
crosstalk[i] = (n_entries - params['a_1']) / n_entries
gain[i] = params['gain']
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute gain and crosstalk'
' in pixel {}'.format(pixel))
print(e)
data = dict(np.load(results_filename))
data['crosstalk'] = crosstalk
data['gain'] = gain
np.savez(results_filename, **data)
save_figure = convert_text(args['--save_figures'])
if save_figure is not None:
output_path = save_figure
spe_histo = Histogram1D.load(charge_histo_filename)
spe_amplitude = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(raw_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
figure_directory = output_path + 'figures/'
if not os.path.exists(figure_directory):
os.makedirs(figure_directory)
histograms = [spe_histo, spe_amplitude, raw_histo, max_histo]
names = ['histogram_charge/', 'histogram_amplitude/', 'histogram_raw/',
'histo_max/']
for i, histo in enumerate(histograms):
figure = plt.figure()
histogram_figure_directory = figure_directory + names[i]
if not os.path.exists(histogram_figure_directory):
os.makedirs(histogram_figure_directory)
for j, pixel in enumerate(pixel_id):
axis = figure.add_subplot(111)
figure_path = histogram_figure_directory + 'pixel_{}'. \
format(pixel)
try:
histo.draw(index=(j,), axis=axis, log=True, legend=False)
figure.savefig(figure_path)
except Exception as e:
print('Could not save pixel {} to : {} \n'.
format(pixel, figure_path))
print(e)
axis.remove()
if args['--display']:
spe_histo = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(os.path.join(output_path,
raw_histo_filename))
max_histo = Histogram1D.load(max_histo_filename)
spe_histo.draw(index=(0,), log=True, legend=False)
raw_histo.draw(index=(0,), log=True, legend=False)
max_histo.draw(index=(0,), log=True, legend=False)
try:
data = np.load(results_filename)
dark_count_rate = data['dcr']
electronic_noise = data['sigma_e']
crosstalk = data['crosstalk']
gain = data['gain']
except IOError as e:
print(e)
print('Could not find the analysis files !')
plt.figure()
plt.hist(dark_count_rate[np.isfinite(dark_count_rate)],
bins='auto')
plt.xlabel('dark count rate [GHz]')
plt.legend(loc='best')
plt.figure()
plt.hist(crosstalk[np.isfinite(crosstalk)],
bins='auto')
plt.xlabel('Crosstalk []')
plt.legend(loc='best')
plt.figure()
plt.hist(gain[np.isfinite(gain)],
bins='auto')
plt.xlabel('Gain [LSB/p.e.]')
plt.legend(loc='best')
plt.figure()
plt.hist(electronic_noise[np.isfinite(electronic_noise)],
bins='auto')
plt.xlabel('$\sigma_e$ [LSB]')
plt.legend(loc='best')
plt.show()
return
if __name__ == '__main__':
entry()
| gpl-3.0 | 8,734,357,305,808,476,000 | 34.322816 | 79 | 0.5302 | false |
mikand/pySmartMirror | modes/clock.py | 1 | 2299 | import pygame
import time
import math
import os.path
class ClockMode(object):
def __init__(self, assets_path):
self.clockfont = pygame.font.Font(os.path.join(assets_path, "Helvetica.ttf"), 70)
self.datefont = pygame.font.Font(os.path.join(assets_path, "Arimo-Regular.ttf"), 20)
self.white = (255, 255, 255)
self.black = (0, 0, 0)
def loop(self, screen):
now = time.localtime()
h = now.tm_hour
h_angle = ((h - 3) * math.pi) / 6.0
m = now.tm_min
m_angle = ((m - 15) * math.pi) / 30.0
s = now.tm_sec
s_angle = ((s- 15) * math.pi) / 30.0
t = self.clockfont.render(time.strftime("%H:%M:%S"), True, self.white)
d = self.datefont.render(time.strftime("%A %d %B %Y"), True, self.white)
space = 10
big_space = 30
radius = 100
center = (screen.get_width() // 2,
(screen.get_height() // 2) - (t.get_height() // 2) - (space//2) - (big_space // 2) - (d.get_height() // 2))
clock_thickness = 10
# Clock panel
pygame.draw.circle(screen, self.white, center, radius, 0)
pygame.draw.circle(screen, self.black, center, radius-clock_thickness, 0)
pygame.draw.line(screen, self.white, center,
(center[0]+(((radius-clock_thickness)*0.5)*math.cos(h_angle)),
center[1]+(((radius-clock_thickness)*0.5)*math.sin(h_angle))), 4)
pygame.draw.line(screen, self.white, center,
(center[0]+(((radius-clock_thickness)*0.9)*math.cos(m_angle)),
center[1]+(((radius-clock_thickness)*0.9) *math.sin(m_angle))), 3)
pygame.draw.aaline(screen, self.white, center,
(center[0]+((radius - clock_thickness//2)*math.cos(s_angle)),
center[1]+((radius - clock_thickness//2)*math.sin(s_angle))))
screen.blit(t, (center[0] - (t.get_width() // 2), center[1] + radius + big_space))
screen.blit(d, (center[0] - (d.get_width() // 2),
center[1] + radius + big_space + t.get_height() + space))
def process_event(self, event):
pass
def preferred_fps(self):
return 10
def deinit(self):
pass
| apache-2.0 | 6,010,482,935,936,599,000 | 36.080645 | 125 | 0.530666 | false |
amandapersampa/MicroGerencia | app/main/controllers/Unidade_medida_controller.py | 1 | 1550 | # coding=utf-8
from flask import jsonify
from flask import render_template,flash,redirect,url_for
from app import app
from app.main.forms.Unidade_medida_forms import Unidade_medida_forms
from app.main.models.Unidade_medida import Unidade_medida_dao
from app.main.service.Unidade_medida_service import Unidade_medida_service
from app.main.util import to_string
service = Unidade_medida_service()
@app.route("/unidadeMedida")
def salva_unidade_medida():
unidade = Unidade_medida_dao("M")
return jsonify(service.salvar(unidade))
@app.route("/unidadeMedida/list")
def findAll_unidade():
resultados = create_cols(service.findAll())
page = {
"titles": ["Código", "Unidade"],
"header": "Unidade de Medida",
"table": "Unidades de Medidas Cadastradas"
}
return render_template("listar.html", page=page, resultados=resultados)
@app.route("/unidadeMedida/<id>")
def findById_unidade(id):
service.findById(id)
return 'ok'
@app.route("/unidadeMedida/cadastro", methods=["GET", "POST"])
def cadastro_unidade_medida():
form = Unidade_medida_forms()
if form.is_submitted():
unidade = Unidade_medida_dao(form.nome.data)
service.salvar(unidade)
return render_template('cadastro_unidade_medida.html', form=form)
def create_cols(list):
lista = []
for i in range(len(list)):
resultado = dict()
resultado['col1'] = to_string(list[i].id_unidade_medida)
resultado['col2'] = to_string(list[i].nome)
lista.append(resultado)
return lista | mit | 8,792,491,212,698,203,000 | 28.807692 | 75 | 0.692705 | false |
AoiKuiyuyou/AoikProducerConsumerRunner | src/aoikproducerconsumerrunner/dep/aoikimportutil.py | 1 | 15431 | # coding: utf-8
"""
File ID: 3ngd7IH
"""
from __future__ import absolute_import
import imp
import os.path
import sys
try:
from urllib.request import urlopen ## Py3
except ImportError:
from urllib2 import urlopen ## Py2
#/
__version__ = '0.2.2'
#/ define |exec_| and |raise_| that are 2*3 compatible.
##
## Modified from |six|:
## https://bitbucket.org/gutworth/six/src/cc9fce6016db076497454f9352e55b4758ccc07c/six.py?at=default#cl-632
##
## ---BEG
if sys.version_info[0] == 2:
#/
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
#/
exec_("""def raise_(exc, tb=None):
raise exc, None, tb
""")
else:
#/
exec_ = eval('exec')
#/
def raise_(exc, tb=None):
if tb is not None and exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
else:
raise exc
## ---END
#/
def add_to_sys_modules(mod_name, mod_obj=None):
"""Add a module object to |sys.modules|.
@param mod_name: module name, used as key to |sys.modules|.
If |mod_name| is |a.b.c| while modules |a| and |a.b| are not existing,
empty modules will be created for |a| and |a.b| as well.
@param mod_obj: a module object.
If None, an empty module object will be created.
"""
#/
mod_sname_s = mod_name.split('.')
#/
parent_mod_name = ''
parent_mod_obj = None
for mod_sname in mod_sname_s:
#/
if parent_mod_name == '':
cur_mod_name = mod_sname
else:
cur_mod_name = parent_mod_name + '.' + mod_sname
#/
if cur_mod_name == mod_name:
#/
cur_mod_obj = mod_obj
else:
#/
cur_mod_obj = sys.modules.get(cur_mod_name, None)
#/
if cur_mod_obj is None:
#/ create an empty module
cur_mod_obj = imp.new_module(cur_mod_name)
#/
sys.modules[cur_mod_name] = cur_mod_obj
#/
if parent_mod_obj is not None:
setattr(parent_mod_obj, mod_sname, cur_mod_obj)
#/
parent_mod_name = cur_mod_name
parent_mod_obj = cur_mod_obj
#/
def import_module_by_code(mod_code, mod_name, sys_add=True, sys_use=True):
"""Create a module object by code.
@param mod_code: the code that the module contains.
@param mod_name: module name.
@param sys_use: whether use an existing module with the same name in |sys.modules|,
instead of creating a new one.
@param sys_add: whether add the module object to |sys.modules|.
If |sys_add| is on, |mod_name| is used as key to |sys.modules|.
If |sys_add| is on, and if |mod_name| is |a.b.c| while modules
|a| and |a.b| are not existing, empty modules will be created
for |a| and |a.b| as well.
"""
#/
mod_obj_old = sys.modules.get(mod_name, None)
#/
if mod_obj_old is not None and sys_use:
return mod_obj_old
#/
mod_obj = imp.new_module(mod_name)
#/ 3plQeic
exec_(mod_code, mod_obj.__dict__, mod_obj.__dict__)
#/
if sys_add:
#/
add_to_sys_modules(mod_name=mod_name, mod_obj=mod_obj)
#/
return mod_obj
#/
def import_module_by_name(mod_name, ns_dir=None):
"""Import a module by module name.
@param mod_name: module name in Python namespace.
@param ns_dir: load from which namespace dir.
Namespace dir means the dir is considered as if it's in |sys.path|.
If |ns_dir| is specified, only load from that dir.
Otherwise load from any namespace dirs in |sys.path|.
"""
#/
if ns_dir is None:
#/
try:
return sys.modules[mod_name]
except KeyError:
pass
#/
__import__(mod_name)
## raise ImportError if the module not exists.
## raise any error from the imported module.
#/
return sys.modules[mod_name]
#/
#assert ns_dir is not None
#/
mod_file_name_s = mod_name.split('.')
## |file_name| means the bare name, without extension.
##
## E.g. 'a.b.c' to ['a', 'b', 'c']
#/
parent_mod_name = '' ## change in each iteration below
mod_file_dir = ns_dir ## change in each iteration below
for mod_file_name in mod_file_name_s:
#/
if parent_mod_name == '':
parent_mod_obj = None
mod_name = mod_file_name
else:
parent_mod_obj = sys.modules[parent_mod_name]
mod_name = parent_mod_name + '.' + mod_file_name
#/
if parent_mod_obj:
__import__(mod_name)
mod_obj = sys.modules[mod_name]
else:
file_handle = None
try:
#/
tup = imp.find_module(mod_file_name, [mod_file_dir])
## raise ImportError
#/
mod_obj = imp.load_module(mod_name, *tup)
## raise any error from the imported module.
#/
file_handle = tup[0]
finally:
if file_handle is not None:
file_handle.close()
#/
parent_mod_name = mod_name
mod_file_dir = os.path.join(mod_file_dir, mod_file_name)
#/
return mod_obj
#/
def import_module_by_path(mod_path, mod_name, sys_add=True, sys_use=True):
"""Import a module by module file path.
@param mod_path: module file path.
@param mod_name: module name to be imported as.
@param sys_use: see func |import_module_by_code|'s same name arg.
@param sys_add: see func |import_module_by_code|'s same name arg.
"""
#/
mod_code = open(mod_path).read()
## raise error
#/
mod_obj = import_module_by_code(
mod_code=mod_code,
mod_name=mod_name,
sys_use=sys_use,
sys_add=sys_add,
)
## raise error
#/
return mod_obj
#/
def import_module_by_http(uri, mod_name, sys_use=True, sys_add=True):
"""Download module code via HTTP and create the module object from the code.
@param uri: HTTP URI of the module file.
@param mod_name: module name to be imported as.
@param sys_use: see func |import_module_by_code|'s same name arg.
@param sys_add: see func |import_module_by_code|'s same name arg.
"""
#/
resp = urlopen(uri)
## raise error
#/
mod_code = resp.read()
## raise error
#/
mod_obj = import_module_by_code(
mod_code=mod_code,
mod_name=mod_name,
sys_use=sys_use,
sys_add=sys_add,
)
## raise error
#/
return mod_obj
#/
def uri_split(uri, mod_attr_sep='::'):
#/
uri_part_s = uri.split(mod_attr_sep, 2)
## use |split| instead of |partition| to be compatible with Python 2.4-
if len(uri_part_s) == 2:
mod_uri, attr_chain = uri_part_s
else:
mod_uri = uri_part_s[0]
attr_chain = None
#/
if uri.startswith('http://'):
#/
prot = 'http'
#/ mod_uri is file url
#mod_uri = mod_uri
#/
elif uri.startswith('https://'):
prot = 'https'
#/ mod_uri is file url
#mod_uri = mod_uri
#/
elif mod_uri.startswith('py://'):
#/
prot = 'py'
#/ mod_uri is module name
mod_uri = mod_uri[5:]
#/
elif mod_uri.startswith('file://'):
#/
prot = 'file'
#/ mod_uri is file path
mod_uri = mod_uri[7:]
#/
elif mod_uri.endswith('.py'):
## This means if no protocol prefix is present, and the uri ends with |.py|,
## then consider the uri as module file path instead of module name.
#/
prot = 'file'
#/ mod_uri is file path
#mod_uri = mod_uri
else:
#/
prot = 'py'
#/ mod_uri is module name
#mod_uri = mod_uri
#/
res = (prot, mod_uri, attr_chain)
return res
#/
def getattr_chain(obj, attr_chain, sep='.'):
"""Get the last attribute of a specified chain of attributes from a specified object.
E.g. |getattr_chain(x, 'a.b.c')| is equivalent to |x.a.b.c|.
@param obj: an object
@param attr_chain: a chain of attribute names
@param sep: separator for the chain of attribute names
"""
#/
if sep is None:
sep = '.'
#/
attr_name_s = attr_chain.split(sep)
#/
new_obj = obj
for attr_name in attr_name_s:
new_obj = getattr(new_obj, attr_name)
#/
return new_obj
#/
def load_obj(
uri,
mod_name=None,
sys_use=True,
sys_add=True,
mod_attr_sep='::',
attr_chain_sep='.',
retn_mod=False,
uri_parts=None,
):
"""Load an object from a module (specified by module name in Python namespace)
or from a module file (specified by module file path).
@param uri: an uri specifying which object to load.
An |uri| consists of two parts: |module uri| and |attr chain|,
e.g. |a/b/c.py::x.y.z| or |a.b.c::x.y.z|
#/ module uri
|a/b/c.py| or |a.b.c| is the |module uri| part.
Can be either a file path or a module name in Python namespace.
Whether it is a file path is determined by whether it ends with |.py|.
#/ attr chain
|x.y.z| is attribute chain on the module object specified by module uri.
@param mod_name: module name to be imported as.
Only applies when |uri| specifies a module file path, not a module name.
If None, the module file's name is used.
E.g. |path/to/hello.py| gets module name |hello|.
@param sys_use: see func |import_module_by_code|'s same name arg.
@param sys_add: see func |import_module_by_code|'s same name arg.
@param mod_attr_sep: see func |load_obj|'s same name arg.
@param attr_chain_sep: see func |load_obj|'s same name arg.
@retn_mod: see func |load_obj|'s same name arg.
"""
#/
if uri_parts is None:
uri_parts = uri_split(uri=uri, mod_attr_sep=mod_attr_sep)
prot, mod_uri, attr_chain = uri_parts
#/
if prot == 'py':
## This means the uri specifies a module name, e.g. |a.b.c|
#/
mod_name_to_load = mod_uri
## avoid naming collision with func arg |mod_name|.
##
## arg |mod_name| is not used when importing by module name.
## the name of the module to import is specified in arg |uri|.
#/
mod_obj = import_module_by_name(mod_name_to_load)
## raise error
else:
## This means the uri specifies a module file path, e.g. |/a/b/c.py|
#/
mod_file_path = mod_uri
#/
if not mod_name:
_, mod_file_name = os.path.split(mod_file_path)
mod_name, _ = os.path.splitext(mod_file_name)
#/
mod_obj = import_module_by_path(mod_file_path,
mod_name=mod_name,
sys_use=sys_use,
sys_add=sys_add,
)
## raise error
#/
if not attr_chain:
if retn_mod:
return mod_obj, None
else:
return mod_obj
#/
#assert attr_chain
attr_obj = getattr_chain(
obj=mod_obj,
attr_chain=attr_chain,
sep=attr_chain_sep,
)
## raise error
#/
if retn_mod:
return mod_obj, attr_obj
else:
return attr_obj
#/
def load_obj_http(
uri,
mod_name=None,
sys_use=True,
sys_add=True,
mod_attr_sep='::',
attr_chain_sep='.',
retn_mod=False,
uri_parts=None,
):
"""Load an object from a remote module file downloaded via HTTP.
@param uri: specify the remote module file's location and which attribute object to load.
#/ load the module object
https://localhost/aoikimportutil/aoikimportutil.py
#/ load the module object, get its attribute object |load_obj_http|.
https://localhost/aoikimportutil/aoikimportutil.py::load_obj_http
@param mod_name: module name to be imported as.
@param sys_use: see func |import_module_by_code|'s same name arg.
@param sys_add: see func |import_module_by_code|'s same name arg.
@param mod_attr_sep: see func |load_obj|'s same name arg.
@param attr_chain_sep: see func |load_obj|'s same name arg.
@retn_mod: see func |load_obj|'s same name arg.
"""
#/
if uri_parts is None:
uri_parts = uri_split(uri=uri, mod_attr_sep=mod_attr_sep)
_, file_url, attr_chain = uri_parts
#/
if not mod_name:
## |None| or |''|
#/ use file name as module name
_, file_name = os.path.split(file_url)
mod_name, _ = os.path.splitext(file_name)
#/ should not happen, but just in case
if not mod_name:
raise ValueError('Module name can not be inferred from the URI.\n URI is |%s|' % uri)
#/
#assert mod_name
mod_obj = import_module_by_http(
uri=file_url,
mod_name=mod_name,
sys_use=sys_use,
sys_add=sys_add,
)
#/
if not attr_chain:
if retn_mod:
return mod_obj, None
else:
return mod_obj
#/
#assert attr_chain
attr_obj = getattr_chain(
obj=mod_obj,
attr_chain=attr_chain,
sep=attr_chain_sep,
)
## raise error
#/
if retn_mod:
return mod_obj, attr_obj
else:
return attr_obj
#/
def load_obj_local_or_remote(
uri,
mod_name=None,
sys_use=True,
sys_add=True,
mod_attr_sep='::',
attr_chain_sep='.',
retn_mod=False,
):
"""Load an object from local or remote (using HTTP).
Whether it's local or remote depends on
whether the |uri| starts with |http://| or |https://|.
Local loading is done via func |load_obj|.
Remote loading is done via func |load_obj_http|.
@param uri: see func |load_obj| or |load_obj_http|'s same name arg.
@param mod_name: see func |load_obj| or |load_obj_http|'s same name arg.
@param sys_use: see func |import_module_by_code|'s same name arg.
@param sys_add: see func |import_module_by_code|'s same name arg.
@param mod_attr_sep: see func |load_obj| or |load_obj_http|'s same name arg.
@param attr_chain_sep: see func |load_obj| or |load_obj_http|'s same name arg.
@retn_mod: see func |load_obj| or |load_obj_http|'s same name arg.
"""
#/
uri_parts = uri_split(uri=uri, mod_attr_sep=mod_attr_sep)
prot = uri_parts[0]
#/
if prot in ('py', 'file'):
#/
return load_obj(
uri,
mod_name=mod_name,
sys_use=sys_use,
sys_add=sys_add,
mod_attr_sep=mod_attr_sep,
attr_chain_sep=attr_chain_sep,
retn_mod=retn_mod,
uri_parts=uri_parts,
)
#/
elif prot in ('http', 'https'):
#/
return load_obj_http(
uri,
mod_name=mod_name,
sys_use=sys_use,
sys_add=sys_add,
mod_attr_sep=mod_attr_sep,
attr_chain_sep=attr_chain_sep,
retn_mod=retn_mod,
uri_parts=uri_parts,
)
#/
else:
#/
assert 0, uri
| mit | 3,262,029,837,573,121,000 | 23.610845 | 108 | 0.554274 | false |
stoneflyop1/py_machine_learning | ch08/main.py | 1 | 1080 | import pandas as pd
df = pd.read_csv('../data/movie_data.csv')
import cleandata
df['review'] = df['review'].apply(cleandata.preprocessor)
# grid search, 非常耗时
#import gridlearn
#gridlearn.learn(df)
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
import tokendata
vect = HashingVectorizer(
decode_error='ignore', n_features=(2 ** 21),
preprocessor=None, tokenizer=tokendata.tokenizer
)
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
import ooclearn
doc_stream = ooclearn.stream_docs(path='../data/movie_data.csv')
import pyprind # 进度条
pbar = pyprind.ProgBar(45)
import numpy as np
classes = np.array([0, 1])
for _ in range(45):
X_train, y_train = ooclearn.get_minibatch(doc_stream, size=1000)
if not X_train: break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes=classes)
pbar.update()
X_test, y_test = ooclearn.get_minibatch(doc_stream, size=5000)
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test)) | mit | 611,696,991,609,127,800 | 27.837838 | 68 | 0.724203 | false |
joequant/Fudge-Python | setup.py | 1 | 1200 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from setuptools import setup, find_packages
setup(
name = 'fudgemsg',
version = '0.1',
description = 'Fudge is a hierarchical, typesafe, binary, self-describing message encoding system.',
license = 'APL2',
url = 'http://github.com/joequant/Fudge-Python',
author = 'Joseph C Wang',
author_email = '[email protected]',
packages = find_packages(),
test_suite = 'nose.collector'
)
| apache-2.0 | -5,373,877,245,192,901,000 | 36.5 | 104 | 0.735 | false |
hodger/regolith | regolith/main.py | 1 | 3514 | """The main CLI for regolith"""
from __future__ import print_function
import os
import json
from argparse import ArgumentParser
from regolith.runcontrol import RunControl, NotSpecified
from regolith.validators import DEFAULT_VALIDATORS
from regolith.database import connect
from regolith import commands
DEFAULT_RC = RunControl(
_validators=DEFAULT_VALIDATORS,
builddir='_build',
mongodbpath=property(lambda self: os.path.join(self.builddir, '_dbpath'))
)
DISCONNECTED_COMMANDS = {
'rc': lambda rc: print(rc._pformat()),
'deploy': commands.deploy,
}
CONNECTED_COMMANDS = {
'add': commands.add_cmd,
'ingest': commands.ingest,
'app': commands.app,
'build': commands.build,
}
def load_json_rcfile(fname):
"""Loads a JSON run control file."""
with open(fname, 'r') as f:
rc = json.load(f)
return rc
def load_rcfile(fname):
"""Loads a run control file."""
base, ext = os.path.splitext(fname)
if ext == '.json':
rc = load_json_rcfile(fname)
else:
raise RuntimeError('could not detemine run control file type from extension.')
return rc
def create_parser():
p = ArgumentParser()
subp = p.add_subparsers(title='cmd', dest='cmd')
# rc subparser
rcp = subp.add_parser('rc', help='prints run control')
# add subparser
addp = subp.add_parser('add', help='adds a record to a database and collection')
addp.add_argument('db', help='database name')
addp.add_argument('coll', help='collection name')
addp.add_argument('documents', nargs='+', help='documents, in JSON / mongodb format')
# ingest subparser
ingp = subp.add_parser('ingest', help='ingest many records from a foreign '
'resource into a database')
ingp.add_argument('db', help='database name')
ingp.add_argument('filename', help='file to ingest')
ingp.add_argument('--coll', dest='coll', default=None,
help='collection name, if this is not given it is infered from the '
'file type or file name.')
# app subparser
appp = subp.add_parser('app', help='starts up a flask app for inspecting and '
'modifying regolith data.')
appp.add_argument('--debug', dest='debug', action='store_true', default=False,
help='starts server in debug mode')
# builder subparser
bldp = subp.add_parser('build', help='builds various available targets')
bldp.add_argument('build_targets', nargs='+', help='targets to build.')
# deploy subparser
depp = subp.add_parser('deploy', help='deploys what was built by regolith')
return p
def filter_databases(rc):
"""Filters the databases list down to only the ones we need, in place."""
dbs = rc.databases
public_only = rc._get('public_only', False)
if public_only:
dbs = [db for db in dbs if db['public']]
dbname = rc._get('db')
if dbname is not None:
dbs = [db for db in dbs if db['name'] == dbname]
rc.databases = dbs
def main(args=None):
rc = DEFAULT_RC
rc._update(load_rcfile('regolithrc.json'))
parser = create_parser()
ns = parser.parse_args(args)
rc._update(ns.__dict__)
filter_databases(rc)
if rc.cmd in DISCONNECTED_COMMANDS:
DISCONNECTED_COMMANDS[rc.cmd](rc)
else:
with connect(rc) as rc.client:
CONNECTED_COMMANDS[rc.cmd](rc)
if __name__ == '__main__':
main()
| cc0-1.0 | -3,644,257,841,367,243,000 | 32.466667 | 90 | 0.627775 | false |
shainer/matasano | set7/md4.py | 1 | 3296 | # Thanks to http://www.acooke.org/cute/PurePython0.html for
# this part.
#!/usr/bin/python3
import binascii
from array import array
from struct import pack, unpack
def _pad(msg, fakeLen):
n = len(msg) if fakeLen is None else fakeLen
bit_len = n * 8
index = (bit_len >> 3) & 0x3f
pad_len = 120 - index
if index < 56:
pad_len = 56 - index
padding = b'\x80' + b'\x00'*63
padded_msg = msg + padding[:pad_len] + pack('<Q', bit_len)
return padded_msg
def _left_rotate(n, b):
return ((n << b) | ((n & 0xffffffff) >> (32 - b))) & 0xffffffff
def _f(x, y, z): return x & y | ~x & z
def _g(x, y, z): return x & y | x & z | y & z
def _h(x, y, z): return x ^ y ^ z
def _f1(a, b, c, d, k, s, X): return _left_rotate(a + _f(b, c, d) + X[k], s)
def _f2(a, b, c, d, k, s, X): return _left_rotate(a + _g(b, c, d) + X[k] + 0x5a827999, s)
def _f3(a, b, c, d, k, s, X): return _left_rotate(a + _h(b, c, d) + X[k] + 0x6ed9eba1, s)
# Implementation of MD4 hashing.
class MD4:
def __init__(self, internalState=None):
if internalState is None:
self.A = 0x67452301
self.B = 0xefcdab89
self.C = 0x98badcfe
self.D = 0x10325476
else:
self.A = internalState[0]
self.B = internalState[1]
self.C = internalState[2]
self.D = internalState[3]
def update(self, message_string, fakeLen=None):
msg_bytes = _pad(message_string, fakeLen)
for i in range(0, len(msg_bytes), 64):
self._compress(msg_bytes[i:i+64])
def _compress(self, block):
a, b, c, d = self.A, self.B, self.C, self.D
x = []
for i in range(0, 64, 4):
x.append(unpack('<I', block[i:i+4])[0])
a = _f1(a,b,c,d, 0, 3, x)
d = _f1(d,a,b,c, 1, 7, x)
c = _f1(c,d,a,b, 2,11, x)
b = _f1(b,c,d,a, 3,19, x)
a = _f1(a,b,c,d, 4, 3, x)
d = _f1(d,a,b,c, 5, 7, x)
c = _f1(c,d,a,b, 6,11, x)
b = _f1(b,c,d,a, 7,19, x)
a = _f1(a,b,c,d, 8, 3, x)
d = _f1(d,a,b,c, 9, 7, x)
c = _f1(c,d,a,b,10,11, x)
b = _f1(b,c,d,a,11,19, x)
a = _f1(a,b,c,d,12, 3, x)
d = _f1(d,a,b,c,13, 7, x)
c = _f1(c,d,a,b,14,11, x)
b = _f1(b,c,d,a,15,19, x)
a = _f2(a,b,c,d, 0, 3, x)
d = _f2(d,a,b,c, 4, 5, x)
c = _f2(c,d,a,b, 8, 9, x)
b = _f2(b,c,d,a,12,13, x)
a = _f2(a,b,c,d, 1, 3, x)
d = _f2(d,a,b,c, 5, 5, x)
c = _f2(c,d,a,b, 9, 9, x)
b = _f2(b,c,d,a,13,13, x)
a = _f2(a,b,c,d, 2, 3, x)
d = _f2(d,a,b,c, 6, 5, x)
c = _f2(c,d,a,b,10, 9, x)
b = _f2(b,c,d,a,14,13, x)
a = _f2(a,b,c,d, 3, 3, x)
d = _f2(d,a,b,c, 7, 5, x)
c = _f2(c,d,a,b,11, 9, x)
b = _f2(b,c,d,a,15,13, x)
a = _f3(a,b,c,d, 0, 3, x)
d = _f3(d,a,b,c, 8, 9, x)
c = _f3(c,d,a,b, 4,11, x)
b = _f3(b,c,d,a,12,15, x)
a = _f3(a,b,c,d, 2, 3, x)
d = _f3(d,a,b,c,10, 9, x)
c = _f3(c,d,a,b, 6,11, x)
b = _f3(b,c,d,a,14,15, x)
a = _f3(a,b,c,d, 1, 3, x)
d = _f3(d,a,b,c, 9, 9, x)
c = _f3(c,d,a,b, 5,11, x)
b = _f3(b,c,d,a,13,15, x)
a = _f3(a,b,c,d, 3, 3, x)
d = _f3(d,a,b,c,11, 9, x)
c = _f3(c,d,a,b, 7,11, x)
b = _f3(b,c,d,a,15,15, x)
# update state
self.A = (self.A + a) & 0xffffffff
self.B = (self.B + b) & 0xffffffff
self.C = (self.C + c) & 0xffffffff
self.D = (self.D + d) & 0xffffffff
# print("Current state is ")
# print(self.A)
# print(self.B)
# print(self.C)
# print(self.D)
def digest(self):
return binascii.hexlify(pack('<IIII', self.A, self.B, self.C, self.D)) | gpl-3.0 | 5,463,946,068,264,438,000 | 25.804878 | 89 | 0.512136 | false |
terranodo/eventkit-cloud | scripts/ci_utils.py | 1 | 2122 | import json
import logging
import os
import subprocess
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import requests
import yaml
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def run_subprocess(command, shell=True, cwd=None):
current_working_directory = cwd or os.getcwd()
try:
result = subprocess.run(
command,
shell=shell,
cwd=current_working_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
return result
except subprocess.CalledProcessError as cpe:
logger.error("There was an error calling %s", command)
logger.error(f"Called from: {current_working_directory}")
logger.error(f"stdout: {cpe.stdout}")
logger.error(f"stderr: {cpe.stderr}")
raise cpe
def pull_and_rename_docker_image(image: str):
image_name, image_tag = image.split(":")
logger.info(f"Pulling image {image}")
run_subprocess(
f"docker login -u {os.getenv('CI_REGISTRY_USER')} -p {os.getenv('CI_REGISTRY_PASSWORD')} {os.getenv('CI_REGISTRY')} && docker pull {os.getenv('CI_REGISTRY_PATH').rstrip('/')}/{image_name}:{image_tag}"
)
run_subprocess(
f"docker tag {os.getenv('CI_REGISTRY_PATH').rstrip('/')}/{image_name}:{image_tag} {image_name}:{image_tag}"
)
logger.info(f"Locally tagged {image}")
def setup_eventkit():
logger.info("Pulling images...")
run_subprocess("cp /etc/ssl/certs/cacert.pem ./conda/cacert.pem")
with open("docker-compose.yml", "r") as _docker_compose_file:
docker_compose = yaml.safe_load(_docker_compose_file)
if not docker_compose:
raise Exception("Could not load docker-compose file.")
images = list(set([service['image'] for name, service in docker_compose["services"].items()]))
with ThreadPoolExecutor() as executor:
futures = [executor.submit(pull_and_rename_docker_image, image) for image in images]
wait(futures, return_when=ALL_COMPLETED)
if __name__ == "__main__":
setup_eventkit()
| bsd-3-clause | 1,915,902,245,713,058,800 | 34.366667 | 208 | 0.6541 | false |
edx/ecommerce | ecommerce/extensions/analytics/tests/test_middleware.py | 1 | 6904 |
from django.test.client import RequestFactory
from social_django.models import UserSocialAuth
from testfixtures import LogCapture
from waffle.testutils import override_switch
from ecommerce.core.constants import ALLOW_MISSING_LMS_USER_ID
from ecommerce.core.exceptions import MissingLmsUserIdException
from ecommerce.core.models import User
from ecommerce.extensions.analytics import middleware
from ecommerce.tests.testcases import TestCase
class TrackingMiddlewareTests(TestCase):
""" Test for TrackingMiddleware. """
TEST_CONTEXT = {'foo': 'bar', 'baz': None, 'lms_user_id': 12345}
MODEL_LOGGER_NAME = 'ecommerce.core.models'
def setUp(self):
super(TrackingMiddlewareTests, self).setUp()
self.middleware = middleware.TrackingMiddleware()
self.request_factory = RequestFactory()
self.user = self.create_user()
def _process_view(self, user):
request = self.request_factory.get('/')
request.user = user
self.middleware.process_view(request, None, None, None)
def _assert_ga_client_id(self, ga_client_id):
self.request_factory.cookies['_ga'] = 'GA1.2.{}'.format(ga_client_id)
self._process_view(self.user)
expected_client_id = self.user.tracking_context.get('ga_client_id')
self.assertEqual(ga_client_id, expected_client_id)
def test_save_ga_client_id(self):
""" Test that middleware save/update GA client id in user tracking context. """
self.assertIsNone(self.user.tracking_context)
self._assert_ga_client_id('test-client-id')
updated_client_id = 'updated-client-id'
self.assertNotEqual(updated_client_id, self.user.tracking_context.get('ga_client_id'))
self._assert_ga_client_id(updated_client_id)
def test_social_auth_lms_user_id(self):
""" Test that middleware saves the LMS user_id from the social auth. """
user = self.create_user(lms_user_id=None)
user.tracking_context = self.TEST_CONTEXT
user.save()
lms_user_id = 67890
UserSocialAuth.objects.create(user=user, provider='edx-oauth2', extra_data={'user_id': lms_user_id})
same_user = User.objects.get(id=user.id)
self.assertIsNone(same_user.lms_user_id)
self._process_view(same_user)
same_user = User.objects.get(id=user.id)
self.assertEqual(same_user.lms_user_id, lms_user_id)
def test_social_auth_multiple_entries_lms_user_id(self):
""" Test that middleware saves the LMS user_id from the social auth, when multiple social auth entries
exist for that user. """
user = self.create_user(lms_user_id=None)
user.tracking_context = self.TEST_CONTEXT
user.save()
lms_user_id = 91827
UserSocialAuth.objects.create(user=user, provider='edx-oidc', uid='older_45', extra_data={'user_id': 123})
UserSocialAuth.objects.create(user=user, provider='edx-oidc', extra_data={'user_id': 456})
social_auth = UserSocialAuth.objects.create(user=user, provider='edx-oauth2',
extra_data={'user_id': lms_user_id})
UserSocialAuth.objects.create(user=user, provider='edx-oauth2', uid='newer_45')
same_user = User.objects.get(id=user.id)
self.assertIsNone(same_user.lms_user_id)
self.assertEqual(same_user.social_auth.count(), 4)
expected = [
(
self.MODEL_LOGGER_NAME,
'INFO',
'Saving lms_user_id from social auth with id {} for user {}. Called from middleware with request '
'path: /, referrer: None'.format(social_auth.id, user.id)
),
]
same_user = User.objects.get(id=user.id)
with LogCapture(self.MODEL_LOGGER_NAME) as log:
self._process_view(same_user)
log.check_present(*expected)
same_user = User.objects.get(id=user.id)
self.assertEqual(same_user.lms_user_id, lms_user_id)
def test_does_not_overwrite_lms_user_id(self):
""" Test that middleware does not overwrite an existing LMS user_id. """
user = self.create_user()
user.tracking_context = self.TEST_CONTEXT
user.save()
initial_lms_user_id = user.lms_user_id
self.assertIsNotNone(initial_lms_user_id)
new_lms_user_id = 10293
UserSocialAuth.objects.create(user=user, provider='edx-oauth2', extra_data={'user_id': new_lms_user_id})
same_user = User.objects.get(id=user.id)
self.assertEqual(initial_lms_user_id, same_user.lms_user_id)
self._process_view(same_user)
same_user = User.objects.get(id=user.id)
self.assertEqual(initial_lms_user_id, same_user.lms_user_id)
def test_no_lms_user_id(self):
""" Test that middleware raises an exception for a missing LMS user_id. """
user = self.create_user(lms_user_id=None)
same_user = User.objects.get(id=user.id)
with self.assertRaises(MissingLmsUserIdException):
self._process_view(same_user)
same_user = User.objects.get(id=user.id)
self.assertIsNone(same_user.lms_user_id)
@override_switch(ALLOW_MISSING_LMS_USER_ID, active=True)
def test_no_lms_user_id_allow_missing(self):
""" Test that middleware logs a missing LMS user_id if the switch is on. """
user = self.create_user(lms_user_id=None)
expected = [
(
self.MODEL_LOGGER_NAME,
'INFO',
'Could not find lms_user_id for user {}. Missing lms_user_id is allowed. Called from middleware with '
'request path: /, referrer: None'
.format(user.id)
),
]
same_user = User.objects.get(id=user.id)
with LogCapture(self.MODEL_LOGGER_NAME) as log:
self._process_view(same_user)
log.check_present(*expected)
same_user = User.objects.get(id=user.id)
self.assertIsNone(same_user.lms_user_id)
@override_switch(ALLOW_MISSING_LMS_USER_ID, active=True)
def test_lms_user_id_exception_allow_missing(self):
""" Test that middleware logs an exception when looking for the LMS user_id. """
user = self.create_user(lms_user_id=None)
UserSocialAuth.objects.create(user=user, provider='edx-oauth2', extra_data=None)
expected = [
(
self.MODEL_LOGGER_NAME,
'WARNING',
'Exception retrieving lms_user_id from social_auth for user {}.'.format(user.id)
),
]
same_user = User.objects.get(id=user.id)
with LogCapture(self.MODEL_LOGGER_NAME) as log:
self._process_view(same_user)
log.check_present(*expected)
same_user = User.objects.get(id=user.id)
self.assertIsNone(same_user.lms_user_id)
| agpl-3.0 | 1,916,622,944,945,380,900 | 40.341317 | 118 | 0.632677 | false |
rookdev/pyes | pyes/connection.py | 1 | 6728 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import random
import socket
import threading
import time
from thrift import Thrift
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from .pyesthrift import Rest
from .exceptions import NoServerAvailable
from . import logger
__all__ = ['connect', 'connect_thread_local', 'NoServerAvailable']
"""
Work taken from pycassa.
You need installed "thrift" to use this.
Just do a "pip install thrift".
"""
DEFAULT_SERVER = ("thrift", "127.0.0.1", 9500)
class ClientTransport(object):
"""Encapsulation of a client session."""
def __init__(self, server, framed_transport, timeout, recycle):
socket = TSocket.TSocket(server.hostname, server.port)
if timeout is not None:
socket.setTimeout(timeout * 1000.0)
if framed_transport:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Rest.Client(protocol)
transport.open()
# server_api_version = client.describe_version().split('.', 1)
# assert server_api_version[0] == API_VERSION[0], \
# "Thrift API version mismatch. " \
# "(Client: %s, Server: %s)" % (API_VERSION[0], server_api_version[0])
self.client = client
self.transport = transport
if recycle:
self.recycle = time.time() + recycle + random.uniform(0, recycle * 0.1)
else:
self.recycle = None
def connect(servers=None, framed_transport=False, timeout=None,
retry_time=60, recycle=None, round_robin=None, max_retries=3):
"""
Constructs a single ElasticSearch connection. Connects to a randomly chosen
server on the list.
If the connection fails, it will attempt to connect to each server on the
list in turn until one succeeds. If it is unable to find an active server,
it will throw a NoServerAvailable exception.
Failing servers are kept on a separate list and eventually retried, no
sooner than `retry_time` seconds after failure.
:keyword servers: [server]
List of ES servers with format: "hostname:port"
Default: [("127.0.0.1",9500)]
:keyword framed_transport: If True, use a TFramedTransport instead of a TBufferedTransport
:keyword timeout: Timeout in seconds (e.g. 0.5)
Default: None (it will stall forever)
:keyword retry_time: Minimum time in seconds until a failed server is reinstated. (e.g. 0.5)
Default: 60
:keyword recycle: Max time in seconds before an open connection is closed and returned to the pool.
Default: None (Never recycle)
:keyword max_retries: Max retry time on connection down
:keyword round_robin: *DEPRECATED*
:return ES client
"""
if servers is None:
servers = [DEFAULT_SERVER]
return ThreadLocalConnection(servers, framed_transport, timeout,
retry_time, recycle, max_retries=max_retries)
connect_thread_local = connect
class ServerSet(object):
"""Automatically balanced set of servers.
Manages a separate stack of failed servers, and automatic
retrial."""
def __init__(self, servers, retry_time=10):
self._lock = threading.RLock()
self._servers = list(servers)
self._retry_time = retry_time
self._dead = []
def get(self):
with self._lock:
if self._dead:
ts, revived = self._dead.pop()
if ts > time.time(): # Not yet, put it back
self._dead.append((ts, revived))
else:
self._servers.append(revived)
logger.info('Server %r reinstated into working pool', revived)
if not self._servers:
logger.critical('No servers available')
raise NoServerAvailable()
return random.choice(self._servers)
def mark_dead(self, server):
with self._lock:
try:
self._servers.remove(server)
self._dead.insert(0, (time.time() + self._retry_time, server))
except ValueError:
pass
class ThreadLocalConnection(object):
def __init__(self, servers, framed_transport=False, timeout=None,
retry_time=10, recycle=None, max_retries=3):
self._servers = ServerSet(servers, retry_time)
self._framed_transport = framed_transport
self._timeout = timeout
self._recycle = recycle
self._max_retries = max_retries
self._local = threading.local()
def __getattr__(self, attr):
def _client_call(*args, **kwargs):
for retry in xrange(self._max_retries + 1):
try:
conn = self._ensure_connection()
return getattr(conn.client, attr)(*args, **kwargs)
except (Thrift.TException, socket.timeout, socket.error), exc:
logger.exception('Client error: %s', exc)
self.close()
if retry < self._max_retries:
continue
raise NoServerAvailable(exc)
setattr(self, attr, _client_call)
return getattr(self, attr)
def _ensure_connection(self):
"""Make certain we have a valid connection and return it."""
conn = self.connect()
if conn.recycle and conn.recycle < time.time():
logger.debug('Client session expired after %is. Recycling.', self._recycle)
self.close()
conn = self.connect()
return conn
def connect(self):
"""Create new connection unless we already have one."""
if not getattr(self._local, 'conn', None):
try:
server = self._servers.get()
logger.debug('Connecting to %s', server)
self._local.conn = ClientTransport(server, self._framed_transport,
self._timeout, self._recycle)
except (Thrift.TException, socket.timeout, socket.error):
logger.warning('Connection to %s failed.', server)
self._servers.mark_dead(server)
return self.connect()
return self._local.conn
def close(self):
"""If a connection is open, close its transport."""
if self._local.conn:
self._local.conn.transport.close()
self._local.conn = None
| bsd-3-clause | -1,582,925,262,316,940,300 | 34.225131 | 104 | 0.598692 | false |
skyostil/tracy | src/generator/Cheetah/Filters.py | 1 | 9941 | #!/usr/bin/env python
# $Id: Filters.py,v 1.1 2006-09-06 09:50:08 skyostil Exp $
"""Filters for the #filter directive; output filters Cheetah's $placeholders .
Filters may now be used standalone, for debugging or for use outside Cheetah.
Class DummyTemplate, instance _dummyTemplateObj and class NoDefault exist only
for this, to provide a default argument for the filter constructors (which
would otherwise require a real template object).
The default filter is now RawOrEncodedUnicode. Please use this as a base class instead of Filter because it handles non-ASCII characters better.
Meta-Data
================================================================================
Author: Tavis Rudd <[email protected]>
Version: $Revision: 1.1 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2006-09-06 09:50:08 $
"""
__author__ = "Tavis Rudd <[email protected]>"
__revision__ = "$Revision: 1.1 $"[11:-2]
from StringIO import StringIO # not cStringIO because of unicode support
# Additional entities WebSafe knows how to transform. No need to include
# '<', '>' or '&' since those will have been done already.
webSafeEntities = {' ': ' ', '"': '"'}
class Error(Exception):
pass
class NoDefault:
pass
class DummyTemplate:
"""Fake template class to allow filters to be used standalone.
This is provides only the level of Template compatibility required by the
standard filters. Namely, the get-settings interface works but there are
no settings. Other aspects of Template are not implemented.
"""
def setting(self, name, default=NoDefault):
if default is NoDefault:
raise KeyError(name)
else:
return default
def settings(self):
return {}
_dummyTemplateObj = DummyTemplate()
##################################################
## BASE CLASS
class Filter(object):
"""A baseclass for the Cheetah Filters."""
def __init__(self, templateObj=_dummyTemplateObj):
"""Setup a ref to the templateObj. Subclasses should call this method.
"""
if hasattr(templateObj, 'setting'):
self.setting = templateObj.setting
else:
self.setting = lambda k: None
if hasattr(templateObj, 'settings'):
self.settings = templateObj.settings
else:
self.settings = lambda: {}
def generateAutoArgs(self):
"""This hook allows the filters to generate an arg-list that will be
appended to the arg-list of a $placeholder tag when it is being
translated into Python code during the template compilation process. See
the 'Pager' filter class for an example."""
return ''
def filter(self, val, **kw):
"""Reimplement this method if you want more advanced filterting."""
return str(val)
##################################################
## ENHANCED FILTERS
#####
class ReplaceNone(Filter):
def filter(self, val, **kw):
"""Replace None with an empty string. Reimplement this method if you
want more advanced filterting."""
if val is None:
return ''
return str(val)
#####
class EncodeUnicode(Filter):
def filter(self, val,
encoding='utf8',
str=str, type=type, unicodeType=type(u''),
**kw):
"""Encode Unicode strings, by default in UTF-8.
>>> import Cheetah.Template
>>> t = Cheetah.Template.Template('''
... $myvar
... ${myvar, encoding='utf16'}
... ''', searchList=[{'myvar': u'Asni\xe8res'}],
... filter='EncodeUnicode')
>>> print t
"""
if type(val)==unicodeType:
filtered = val.encode(encoding)
elif val is None:
filtered = ''
else:
filtered = str(val)
return filtered
class RawOrEncodedUnicode(Filter):
def filter(self, val,
#encoding='utf8',
encoding=None,
str=str, type=type, unicodeType=type(u''),
**kw):
"""Pass Unicode strings through unmolested, unless an encoding is specified.
"""
if type(val)==unicodeType:
if encoding:
filtered = val.encode(encoding)
else:
filtered = val
elif val is None:
filtered = ''
else:
filtered = str(val)
return filtered
#####
class MaxLen(RawOrEncodedUnicode):
def filter(self, val, **kw):
"""Replace None with '' and cut off at maxlen."""
output = super(MaxLen, self).filter(val, **kw)
if kw.has_key('maxlen') and len(output) > kw['maxlen']:
return output[:kw['maxlen']]
return output
#####
class Pager(RawOrEncodedUnicode):
def __init__(self, templateObj=_dummyTemplateObj):
Filter.__init__(self, templateObj)
self._IDcounter = 0
def buildQString(self,varsDict, updateDict):
finalDict = varsDict.copy()
finalDict.update(updateDict)
qString = '?'
for key, val in finalDict.items():
qString += str(key) + '=' + str(val) + '&'
return qString
def generateAutoArgs(self):
ID = str(self._IDcounter)
self._IDcounter += 1
return ', trans=trans, ID=' + ID
def filter(self, val, **kw):
"""Replace None with '' and cut off at maxlen."""
output = super(Pager, self).filter(val, **kw)
if kw.has_key('trans') and kw['trans']:
ID = kw['ID']
marker = kw.get('marker', '<split>')
req = kw['trans'].request()
URI = req.environ()['SCRIPT_NAME'] + req.environ()['PATH_INFO']
queryVar = 'pager' + str(ID) + '_page'
fields = req.fields()
page = int(fields.get( queryVar, 1))
pages = output.split(marker)
output = pages[page-1]
output += '<BR>'
if page > 1:
output +='<A HREF="' + URI + self.buildQString(fields, {queryVar:max(page-1,1)}) + \
'">Previous Page</A> '
if page < len(pages):
output += '<A HREF="' + URI + self.buildQString(
fields,
{queryVar:
min(page+1,len(pages))}) + \
'">Next Page</A>'
return output
return output
#####
class WebSafe(RawOrEncodedUnicode):
"""Escape HTML entities in $placeholders.
"""
def filter(self, val, **kw):
s = super(WebSafe, self).filter(val, **kw)
# These substitutions are copied from cgi.escape().
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
# Process the additional transformations if any.
if kw.has_key('also'):
also = kw['also']
entities = webSafeEntities # Global variable.
for k in also:
if entities.has_key(k):
v = entities[k]
else:
v = "&#%s;" % ord(k)
s = s.replace(k, v)
# Return the puppy.
return s
#####
class Strip(RawOrEncodedUnicode):
"""Strip leading/trailing whitespace but preserve newlines.
This filter goes through the value line by line, removing leading and
trailing whitespace on each line. It does not strip newlines, so every
input line corresponds to one output line, with its trailing newline intact.
We do not use val.split('\n') because that would squeeze out consecutive
blank lines. Instead, we search for each newline individually. This
makes us unable to use the fast C .split method, but it makes the filter
much more widely useful.
This filter is intended to be usable both with the #filter directive and
with the proposed #sed directive (which has not been ratified yet.)
"""
def filter(self, val, **kw):
s = super(Strip, self).filter(val, **kw)
result = []
start = 0 # The current line will be s[start:end].
while 1: # Loop through each line.
end = s.find('\n', start) # Find next newline.
if end == -1: # If no more newlines.
break
chunk = s[start:end].strip()
result.append(chunk)
result.append('\n')
start = end + 1
# Write the unfinished portion after the last newline, if any.
chunk = s[start:].strip()
result.append(chunk)
return "".join(result)
#####
class StripSqueeze(RawOrEncodedUnicode):
"""Canonicalizes every chunk of whitespace to a single space.
Strips leading/trailing whitespace. Removes all newlines, so multi-line
input is joined into one ling line with NO trailing newline.
"""
def filter(self, val, **kw):
s = super(StripSqueeze, self).filter(val, **kw)
s = s.split()
return " ".join(s)
##################################################
## MAIN ROUTINE -- testing
def test():
s1 = "abc <=> &"
s2 = " asdf \n\t 1 2 3\n"
print "WebSafe INPUT:", `s1`
print " WebSafe:", `WebSafe().filter(s1)`
print
print " Strip INPUT:", `s2`
print " Strip:", `Strip().filter(s2)`
print "StripSqueeze:", `StripSqueeze().filter(s2)`
print "Unicode:", `EncodeUnicode().filter(u'aoeu12345\u1234')`
if __name__ == "__main__": test()
# vim: shiftwidth=4 tabstop=4 expandtab
| mit | -7,940,394,277,864,934,000 | 32.27931 | 145 | 0.54411 | false |
skosukhin/spack | var/spack/repos/builtin/packages/r-base64/package.py | 1 | 1765 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RBase64(RPackage):
"""Compatibility wrapper to replace the orphaned package by Romain
Francois. New applications should use the 'openssl' or 'base64enc'
package instead."""
homepage = "https://cran.r-project.org/package=base64"
url = "https://cran.rstudio.com/src/contrib/base64_2.0.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/base64"
version('2.0', 'f5a653842f75ad717ef6a00969868ae5')
depends_on('r-openssl', type=('build', 'run'))
| lgpl-2.1 | -1,151,951,975,833,587,200 | 44.25641 | 78 | 0.675921 | false |
Skylion007/popupcad | dev_tools/propertyeditor.py | 1 | 11614 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes.
Email: danaukes<at>seas.harvard.edu.
Please see LICENSE.txt for full license.
"""
import PySide.QtCore as qc
import PySide.QtGui as qg
class Child(object):
# display = ['a','b','c','d','e']
editable = ['a', 'b', 'c']
new_attr = {'<new_string>': '', '<new_float>': 0.0}
deletable = ['*']
attr_names_editable = True
def __init__(self):
self.a = range(5)
self.b = 'text'
self.c = 4.5
self.d = False
self.e = 5
class Depth(object):
# hidden = ['depth']
depth = -1
def __init__(self):
if Depth.depth < 10:
Depth.depth += 1
self.a = Depth()
class Parent(object):
# display = ['child','tuple1','list1','x','a','dict1']
editable = ['child', 'tuple1', 'list1', 'x', 'a', 'dict1']
deletable = ['*']
attr_names_editable = True
def __init__(self):
self.child = Child()
self.tuple1 = (234.5, 1, Child())
self.list1 = [234.5, 1, Child()]
self.x = 123.4
self.a = 5
self.dict1 = {'a': 5, 'b': 6.6}
self.recursiveclass = Depth()
class ParentItem(object):
valuekeys = [int, float, bool, str]
listkeys = [tuple, list]
def __init__(self, value, treewidgetparent):
self.structureparent = treewidgetparent
self.value = value
self.valuetype = type(value)
self.children = []
self.level = 1
self.generatechildren(self.structureparent)
def addChild(self, item):
self.children.append(item)
self.structureparent.addTopLevelItem(item)
def removeattr(self, key):
try:
delattr(self.value, key)
except AttributeError:
pass
def removeAllChildren(self):
for ii in range(len(self.children))[::-1]:
self.structureparent.takeTopLevelItem(ii)
def setchild(self, key, value):
if self.valuetype in self.valuekeys:
raise Exception
elif self.valuetype == list:
index = int(key[1:-1])
self.value[index] = value
# self.updateparent()
elif self.valuetype == tuple:
index = int(key[1:-1])
oldtuple = list(self.value)
oldtuple[index] = value
self.value = tuple(oldtuple)
self.updateparent()
elif self.valuetype == dict:
self.value[key] = value
else:
setattr(self.value, key, value)
def updateparent(self):
pass
def parent(self):
return self.structureparent
def sortChildren(self, *args, **kwargs):
self.structureparent.sortItems(*args, **kwargs)
def generatechildren(self, parent):
if self.valuetype in self.valuekeys:
pass
elif (self.valuetype == tuple) or (self.valuetype == list):
for ii, item in enumerate(self.value):
self.addChild(
TreeWidgetItem(
parent,
self,
'[{index:0d}]'.format(
index=ii),
item,
self.level +
1))
elif self.valuetype == dict:
for key, value in self.value.items():
self.addChild(
TreeWidgetItem(
parent,
self,
key,
value,
self.level +
1))
else:
keys = dir(self.value)
class dummy(object):
pass
commonkeys = dir(dummy())
keys = list(set(keys) - set(commonkeys))
try:
keys = list(set(keys) - set(self.value.hidden))
except AttributeError:
pass
keys2 = []
for key in keys:
try:
if key[0] != '_':
keys2.append(key)
except IndexError:
keys2.append(key)
keys = keys2
try:
keys = list(set(keys).intersection(self.value.display))
except AttributeError:
pass
keys = list(set(keys) - set(['hidden',
'editable',
'deletable',
'new_attr',
'attr_names_editable']))
classkeys = []
for key in keys:
value = getattr(self.value, key)
if hasattr(value, '__call__'):
pass
else:
classkeys.append(key)
classkeys.sort()
for key in classkeys:
value = getattr(self.value, key)
self.addChild(
TreeWidgetItem(
parent,
self,
key,
value,
self.level +
1))
try:
for key, value in self.value.new_attr.items():
self.addChild(
InsertNewWidgetItem(
parent,
self,
self.level + 1,
key,
value))
except AttributeError:
pass
self.sortChildren(0, qc.Qt.SortOrder.AscendingOrder)
def refresh(self):
self.removeAllChildren()
self.generatechildren(self.structureparent)
class TreeWidgetItem(qg.QTreeWidgetItem, ParentItem):
depthlimit = 5
def __init__(self, parent, dataparent, key, value, level):
self.structureparent = self
self.dataparent = dataparent
self.key = key
self.value = value
self.valuetype = type(value)
self.level = level
qg.QTreeWidgetItem.__init__(self, parent, [key, str(value)])
self.setFlags(qc.Qt.ItemFlag.ItemIsEditable |
qc.Qt.ItemFlag.ItemIsEnabled | qc.Qt.ItemFlag.ItemIsSelectable)
if self.level < self.depthlimit:
self.generatechildren(self)
def updateparent(self):
self.dataparent.setchild(self.key, self.value)
def parent(self):
return qg.QTreeWidgetItem.parent(self)
def removeAllChildren(self):
for ii in range(self.childCount())[::-1]:
self.removeChild(self.child(ii))
def is_editable(self, column):
if column == 1:
try:
result = (
self.key in self.dataparent.value.editable) or (
'*' in self.dataparent.value.editable)
return result
except AttributeError:
if self.dataparent.valuetype in self.dataparent.listkeys:
return self.dataparent.is_editable(column)
return False
if column == 0:
try:
return self.dataparent.value.attr_names_editable
except AttributeError:
return False
else:
return False
def is_deletable(self):
try:
return self.key in self.dataparent.value.deletable or '*'in self.dataparent.value.deletable
except AttributeError:
if self.dataparent.valuetype in self.dataparent.listkeys:
return False
return False
def setData(self, column, role, value):
if role == qc.Qt.ItemDataRole.EditRole:
if self.is_editable(column):
if column == 1:
if self.valuetype == bool:
value = value.lower()
if (value == 'false') or (
value == '0') or (value == 'f'):
self.value = False
elif (value == 'true') or (value == '1') or (value == 't'):
self.value = True
self.updateparent()
elif self.valuetype in self.valuekeys:
self.value = self.valuetype(value)
self.updateparent()
if column == 0:
if not hasattr(self.dataparent.value, value):
self.dataparent.removeattr(value)
self.dataparent.removeattr(self.key)
self.key = value
self.updateparent()
self.dataparent.refresh()
def data(self, column, role):
if column == 0:
if role == qc.Qt.ItemDataRole.DisplayRole:
return self.key
if role == qc.Qt.ItemDataRole.EditRole:
return self.key
elif column == 1:
if role == qc.Qt.ItemDataRole.DisplayRole:
return str(self.value)
if role == qc.Qt.ItemDataRole.EditRole:
return str(self.value)
class InsertNewWidgetItem(TreeWidgetItem):
def __init__(
self,
parent,
dataparent,
level,
key='<new string>',
value=''):
self.dataparent = dataparent
self.key = key
self.value = value
self.valuetype = type(value)
self.level = level
qg.QTreeWidgetItem.__init__(self, parent, [key, str(value)])
self.setFlags(qc.Qt.ItemFlag.ItemIsEditable |
qc.Qt.ItemFlag.ItemIsEnabled | qc.Qt.ItemFlag.ItemIsSelectable)
if self.level < self.depthlimit:
self.generatechildren(self)
def setData(self, column, role, value):
if column == 0:
if role == qc.Qt.ItemDataRole.EditRole:
if isinstance(self.value, list):
newvalue = self.value[:]
elif isinstance(self.value, tuple):
newvalue = self.value[:]
elif type(self.value) in ParentItem.valuekeys:
newvalue = self.value
else:
newvalue = self.value.copy()
newtwi = TreeWidgetItem(
self.parent(),
self.dataparent,
value,
newvalue,
self.dataparent.level +
1)
self.dataparent.addChild(newtwi)
try:
self.dataparent.value.editable.append(value)
except AttributeError:
pass
newtwi.updateparent()
self.dataparent.sortChildren(0, qc.Qt.SortOrder.AscendingOrder)
class PropertyEditor(qg.QTreeWidget):
def __init__(self, data, *args, **kwargs):
super(PropertyEditor, self).__init__(*args, **kwargs)
self.setHeaderLabels(['key', 'value'])
self.parent = ParentItem(data, self)
self.setAlternatingRowColors(True)
def keyPressEvent(self, event):
if event.key() == qc.Qt.Key_Delete:
item = self.selectedItems()[0]
if item.is_deletable():
item.dataparent.removeattr(item.key)
item.dataparent.refresh()
if __name__ == '__main__':
import sys
app = qg.QApplication(sys.argv)
parent = Parent()
pv = PropertyEditor(parent)
pv.show()
# sys.exit(app.exec_())
| mit | 4,632,046,971,831,045,000 | 30.906593 | 103 | 0.482779 | false |
pjh/vm-analyze | analyze/RssEvent.py | 1 | 1166 | # Virtual memory analysis scripts.
# Developed 2012-2014 by Peter Hornyack, [email protected]
# Copyright (c) 2012-2014 Peter Hornyack and University of Washington
import trace.vm_common as vm
'''
Describes trace events that caused the rss page count (resident in physical
memory) to change.
'''
class RssEvent:
tag = "RssEvent"
rss_pages = None
timestamp = None
# rss_dict is a mapping from RSS_TYPES to page counts. After
# initialization, the RssEvent.rss_pages mapping is guaranteed to
# have an entry for each type in RSS_TYPES.
def __init__(self, rss_dict, timestamp):
tag = "{0}.__init__".format(self.tag)
if not rss_dict or not timestamp:
print_error_exit(tag, ("invalid arg: rss_dict={}, timestamp="
"{}").format(rss_dict, timestamp))
self.rss_pages = dict()
for rss_type in vm.RSS_TYPES:
# todo: make RSS_TYPES an enum, and just use a list here
# instead of creating yet another dict.
if rss_type in rss_dict:
self.rss_pages[rss_type] = rss_dict[rss_type]
else:
self.rss_pages[rss_type] = 0
self.timestamp = timestamp
return
if __name__ == '__main__':
print_error_exit("not an executable module")
| bsd-3-clause | 1,830,437,122,687,074,300 | 28.15 | 75 | 0.696398 | false |
wpoely86/easybuild-easyblocks | easybuild/easyblocks/t/tinker.py | 1 | 6440 | ##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing TINKER, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import glob
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import DARWIN, LINUX, get_os_type
class EB_TINKER(EasyBlock):
"""Support for building/installing TINKER."""
def __init__(self, *args, **kwargs):
"""Custom easyblock constructor for TINKER: initialise class variables."""
super(EB_TINKER, self).__init__(*args, **kwargs)
self.build_subdir = None
self.build_in_installdir = True
def configure_step(self):
"""Custom configuration procedure for TINKER."""
# make sure FFTW is available
if get_software_root('FFTW') is None:
raise EasyBuildError("FFTW dependency is not available.")
os_dirs = {
LINUX: 'linux',
DARWIN: 'macosx',
}
os_type = get_os_type()
os_dir = os_dirs.get(os_type)
if os_dir is None:
raise EasyBuildError("Failed to determine OS directory for %s (known: %s)", os_type, os_dirs)
comp_dirs = {
toolchain.INTELCOMP: 'intel',
toolchain.GCC: 'gfortran',
}
comp_fam = self.toolchain.comp_family()
comp_dir = comp_dirs.get(comp_fam)
if comp_dir is None:
raise EasyBuildError("Failed to determine compiler directory for %s (known: %s)", comp_fam, comp_dirs)
self.build_subdir = os.path.join(os_dir, comp_dir)
self.log.info("Using build scripts from %s subdirectory" % self.build_subdir)
# patch 'link.make' script to use FFTW provided via EasyBuild
link_make_fp = os.path.join(self.cfg['start_dir'], self.build_subdir, 'link.make')
for line in fileinput.input(link_make_fp, inplace=1, backup='.orig'):
line = re.sub(r"libfftw3_threads.a libfftw3.a", r"-L$EBROOTFFTW/lib -lfftw3_threads -lfftw3", line)
sys.stdout.write(line)
def build_step(self):
"""Custom build procedure for TINKER."""
source_dir = os.path.join(self.cfg['start_dir'], 'source')
try:
os.chdir(source_dir)
except OSError, err:
raise EasyBuildError("Failed to move to %s: %s", source_dir, err)
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'compile.make'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'library.make'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'link.make'))
def test_step(self):
"""Custom built-in test procedure for TINKER."""
if self.cfg['runtest']:
# copy tests, params and built binaries to temporary directory for testing
tmpdir = tempfile.mkdtemp()
testdir = os.path.join(tmpdir, 'test')
mkdir(os.path.join(tmpdir, 'bin'))
binaries = glob.glob(os.path.join(self.cfg['start_dir'], 'source', '*.x'))
try:
for binary in binaries:
shutil.copy2(binary, os.path.join(tmpdir, 'bin', os.path.basename(binary)[:-2]))
shutil.copytree(os.path.join(self.cfg['start_dir'], 'test'), testdir)
shutil.copytree(os.path.join(self.cfg['start_dir'], 'params'), os.path.join(tmpdir, 'params'))
except OSError, err:
raise EasyBuildError("Failed to copy binaries and tests to %s: %s", tmpdir, err)
try:
os.chdir(testdir)
except OSError, err:
raise EasyBuildError("Failed to move to %s to run tests: %s", testdir, err)
# run all tests via the provided 'run' scripts
tests = glob.glob(os.path.join(testdir, '*.run'))
# gpcr takes too logn (~1h), ifabp fails due to input issues (?)
tests = [t for t in tests if not (t.endswith('gpcr.run') or t.endswith('ifabp.run'))]
for test in tests:
run_cmd(test)
def install_step(self):
"""Custom install procedure for TINKER."""
source_dir = os.path.join(self.cfg['start_dir'], 'source')
try:
os.chdir(source_dir)
except OSError, err:
raise EasyBuildError("Failed to move to %s: %s", source_dir, err)
mkdir(os.path.join(self.cfg['start_dir'], 'bin'))
run_cmd(os.path.join(self.cfg['start_dir'], self.build_subdir, 'rename.make'))
def sanity_check_step(self):
"""Custom sanity check for TINKER."""
custom_paths = {
'files': ['tinker/source/libtinker.a'],
'dirs': ['tinker/bin'],
}
super(EB_TINKER, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for module file prepend-path statements."""
guesses = super(EB_TINKER, self).make_module_req_guess()
guesses['PATH'].append(os.path.join('tinker', 'bin'))
guesses['LIBRARY_PATH'].append(os.path.join('tinker', 'source'))
return guesses
| gpl-2.0 | 8,215,122,396,294,133,000 | 40.548387 | 114 | 0.63323 | false |
jiaphuan/models | research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_test.py | 1 | 5797 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor."""
import numpy as np
import tensorflow as tf
from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as faster_rcnn_resnet_v1
class FasterRcnnResnetV1FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self,
first_stage_features_stride,
architecture='resnet_v1_101'):
feature_extractor_map = {
'resnet_v1_50':
faster_rcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor,
'resnet_v1_101':
faster_rcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor,
'resnet_v1_152':
faster_rcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor
}
return feature_extractor_map[architecture](
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']:
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16, architecture=architecture)
preprocessed_inputs = tf.random_uniform(
[4, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [4, 14, 14, 1024])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[4, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [4, 28, 28, 1024])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 1024])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_on_very_small_images(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
rpn_feature_map = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(
features_shape,
feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)})
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[3, 7, 7, 1024], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [3, 7, 7, 2048])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -1,776,759,771,459,436,300 | 41.313869 | 100 | 0.683112 | false |
upconsulting/IsisCB | isiscb/isisdata/forms.py | 1 | 10686 | from __future__ import unicode_literals
from haystack.forms import FacetedSearchForm
from django import forms
from django.db import models
from django.apps import apps
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from haystack import connections
from haystack.constants import DEFAULT_ALIAS
from haystack.query import EmptySearchQuerySet, SearchQuerySet
from haystack.utils import get_model_ct
from haystack.inputs import Clean
from captcha.fields import CaptchaField
from allauth.account.forms import SignupForm
import time
from isisdata import helper_methods
from isisdata.models import Citation, Authority
from openurl.models import *
import re
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
def model_choices(using=DEFAULT_ALIAS):
choices = [(get_model_ct(m), capfirst(smart_text(m._meta.verbose_name_plural)))
for m in connections[using].get_unified_index().get_indexed_models()]
return sorted(choices, key=lambda x: x[1])
class MyFacetedSearchForm(FacetedSearchForm):
sort_order_citation = forms.CharField(required=False, widget=forms.HiddenInput, initial='publication_date_for_sort')
sort_order_dir_citation = forms.CharField(required=False, widget=forms.HiddenInput, initial='descend')
sort_order_dir_authority = forms.CharField(required=False, widget=forms.HiddenInput, initial='ascend')
raw_search = forms.BooleanField(required=False, widget=forms.HiddenInput, initial='')
def __init__(self, *args, **kwargs):
super(MyFacetedSearchForm, self).__init__(*args, **kwargs)
self.excluded_facets = kwargs.get('data', {}).getlist('excluded_facets', [])
self.facet_operators = kwargs.get('data', {}).getlist('facet_operators', [])
def get_authority_model(self):
"""Return an alphabetical list of model classes in the index."""
search_models = []
if self.is_valid():
search_models.append(apps.get_model(*'isisdata.authority'.split('.')))
# search_models.append(models.get_model(*'isisdata.authority'.split('.')))
return search_models
def get_citation_model(self):
"""Return an alphabetical list of model classes in the index."""
search_models = []
if self.is_valid():
search_models.append(apps.get_model(*'isisdata.citation'.split('.')))
return search_models
def get_sort_order_citation(self):
sort_order = 'publication_date_for_sort'
if self.is_valid():
sort_order = self.cleaned_data.get('sort_order_citation', 'publication_date_for_sort')
if not sort_order:
sort_order = 'publication_date_for_sort'
#if not sort_order and self.cleaned_data['models'] == 'isisdata.authority':
# sort_order = 'name'
return sort_order
def get_sort_order_authority(self):
sort_order = 'name'
if self.is_valid():
sort_order = self.cleaned_data.get('sort_order_authority', 'name')
if not sort_order:
sort_order = 'name'
#if not sort_order and self.cleaned_data['models'] == 'isisdata.authority':
# sort_order = 'name'
return sort_order
def get_sort_order_direction_citation(self):
sort_order_dir = 'descend'
if self.is_valid():
sort_order_dir = self.cleaned_data.get('sort_order_dir_citation', 'ascend')
if not sort_order_dir:
sort_by = self.cleaned_data.get('sort_order_citation', 'publication_date_for_sort')
if (sort_by == 'publication_date_for_sort' or not sort_by):
sort_order_dir = 'descend'
else:
sort_order_dir = 'ascend'
return sort_order_dir
def get_sort_order_direction_authority(self):
sort_order_dir = 'ascend'
if self.is_valid():
sort_order_dir = self.cleaned_data.get('sort_order_dir_authority', 'ascend')
if not sort_order_dir:
sort_order_dir = 'ascend'
return sort_order_dir
def has_specified_field(self, query_string):
query_parameters = query_string.split(':')
# no field specified
if len(query_parameters) <= 1:
return (query_string, 'content')
# field might be specified but with preceeding blank
# so we ignore it
if query_parameters[1].startswith(' '):
return (query_string, 'content')
return (query_string[len(query_parameters[0]) + 1:], query_parameters[0])
def search(self):
if not self.is_valid():
#return self.no_query_found()
return {'authority' : self.no_query_found(),
'citation': self.no_query_found()}
if not self.cleaned_data.get('q'):
#return self.no_query_found()
return {'authority' : self.no_query_found(),
'citation': self.no_query_found()}
is_raw_search = self.cleaned_data['raw_search']
query_tuple = self.has_specified_field(self.cleaned_data['q'])
# Removed: query sanitization already occurs (by design) in the
# (haystack) Query used by the SearchEngine. We're clobbering wildcards
# here. We can add it back if there is a security issue, but it seems
# like this should all happen in the search backend. -EP
#
# if query_tuple[1] == 'content':
# qstring = helper_methods.normalize(qstring)
# if we want several fields specified etc, we need to set the raw_search flag
if not is_raw_search:
sqs = self.searchqueryset.auto_query(*query_tuple)
else:
sqs = self.searchqueryset.raw_search(self.cleaned_data['q'])
sqs_citation = sqs.load_all()
sqs_authority = sqs_citation
# We apply faceting ourselves.
sqs_citation = self.set_facets(self.selected_facets, sqs_citation, "citation_", self.facet_operators)
sqs_authority = self.set_facets(self.selected_facets, sqs_authority, "authority_", self.facet_operators)
# exclude facets
sqs_citation = self.exclude_facets(self.excluded_facets, sqs_citation, "citation_")
sqs_authority = self.exclude_facets(self.excluded_facets, sqs_authority, "authority_")
sort_order_citation = self.get_sort_order_citation()
sort_order_authority = self.get_sort_order_authority()
sort_order_dir_citation = self.get_sort_order_direction_citation()
sort_order_dir_authority = self.get_sort_order_direction_authority()
if sort_order_dir_citation == 'descend':
sort_order_citation = "-" + sort_order_citation
if sort_order_dir_authority == 'descend':
sort_order_authority = "-" + sort_order_authority
results_authority = sqs_authority.models(*self.get_authority_model()).filter(public=True).order_by(sort_order_authority)
results_citation = sqs_citation.models(*self.get_citation_model()).filter(public=True).order_by(sort_order_citation)
return {'authority' : results_authority,
'citation': results_citation}
def set_facets(self, selected_facets, sqs, type_string, facet_operators):
operators = {}
for op in facet_operators:
op_type, operator = op.split(":", 1)
operators[op_type] = operator
or_facets = {}
for facet in selected_facets:
if ":" not in facet:
continue
field, value = facet.split(":", 1)
field = field.strip()
value = value.strip()
if value and field.startswith(type_string):
field = field[len(type_string):]
# if facets should be connected with and just narrow query
# otherwise save value for combined query later
if operators.get(field, 'and') == 'or':
value_list = or_facets.setdefault(field, [])
value_list.append(value)
else:
sqs = sqs.narrow(u'%s:"%s"' % (field, Clean(value)))
# create 'and' query
for or_facet in list(or_facets.keys()):
query_str = ' OR '.join(or_facets[or_facet])
sqs = sqs.narrow(u'%s:%s' % (or_facet, Clean('(' + query_str + ')')))
return sqs
def exclude_facets(sef, excluded_facets, sqs, type_string):
for facet in excluded_facets:
if ":" not in facet:
continue
field, value = facet.split(":", 1)
field = field.strip()
value = value.strip()
if value and field.startswith(type_string):
field = field[len(type_string):]
if field.endswith('_exact'):
field = field[0:(len(field) - 6)]
# Exclude facets
sqs = sqs.exclude(**{field + "__exact" : Clean(value)})
return sqs
class UserRegistrationForm(SignupForm):
username = forms.CharField()
email = forms.CharField(widget=forms.EmailInput())
password1 = forms.CharField(widget=forms.PasswordInput(), label='Password')
password2 = forms.CharField(widget=forms.PasswordInput(), label='Password (again)')
captcha = CaptchaField()
next = forms.CharField(widget=forms.HiddenInput(), required=False)
def clean_username(self):
username = self.cleaned_data['username']
try:
existing_user = User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(u'Username "%s" is already in use.' % username)
class UserProfileForm(forms.Form):
email = forms.CharField(widget=forms.EmailInput(attrs={'class': 'form-control'}))
first_name = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}), required=False)
last_name = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}), required=False)
affiliation = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}), required=False)
location = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}), required=False)
bio = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control'}), required=False)
share_email = forms.BooleanField(required=False)
resolver_institution = forms.ModelChoiceField(queryset=Institution.objects.all(), widget=forms.Select(attrs={'class': 'form-control'}), required=False)
| mit | 5,529,970,389,522,010,000 | 39.022472 | 155 | 0.627363 | false |
floemker/django-wiki | tests/base.py | 1 | 3478 | import os
import unittest
import django_functest
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.urls import reverse
from wiki.models import URLPath
SUPERUSER1_USERNAME = 'admin'
SUPERUSER1_PASSWORD = 'secret'
class RequireSuperuserMixin:
def setUp(self):
super().setUp()
from django.contrib.auth import get_user_model
User = get_user_model()
self.superuser1 = User.objects.create_superuser(
SUPERUSER1_USERNAME,
'[email protected]',
SUPERUSER1_PASSWORD
)
class RequireBasicData(RequireSuperuserMixin):
"""
Mixin that creates common data required for all tests.
"""
pass
class TestBase(RequireBasicData, TestCase):
pass
class RequireRootArticleMixin:
def setUp(self):
super().setUp()
self.root = URLPath.create_root()
self.root_article = URLPath.root().article
rev = self.root_article.current_revision
rev.title = "Root Article"
rev.content = "root article content"
rev.save()
class ArticleTestBase(RequireRootArticleMixin, TestBase):
"""
Sets up basic data for testing with an article and some revisions
"""
pass
class DjangoClientTestBase(TestBase):
def setUp(self):
super().setUp()
self.client.login(username=SUPERUSER1_USERNAME, password=SUPERUSER1_PASSWORD)
class WebTestCommonMixin(RequireBasicData, django_functest.ShortcutLoginMixin):
"""
Common setup required for WebTest and Selenium tests
"""
def setUp(self):
super().setUp()
self.shortcut_login(username=SUPERUSER1_USERNAME, password=SUPERUSER1_PASSWORD)
class WebTestBase(WebTestCommonMixin, django_functest.FuncWebTestMixin, TestCase):
pass
INCLUDE_SELENIUM_TESTS = os.environ.get('INCLUDE_SELENIUM_TESTS', '0') == '1'
@unittest.skipUnless(INCLUDE_SELENIUM_TESTS, "Skipping Selenium tests")
class SeleniumBase(WebTestCommonMixin, django_functest.FuncSeleniumMixin, StaticLiveServerTestCase):
driver_name = "Chrome"
display = os.environ.get('SELENIUM_SHOW_BROWSER', '0') == '1'
if not INCLUDE_SELENIUM_TESTS:
# Don't call super() in setUpClass(), it will attempt to instantiate
# a browser instance which is slow and might fail
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
class ArticleWebTestUtils:
def get_by_path(self, path):
"""
Get the article response for the path.
Example: self.get_by_path("Level1/Slug2/").title
"""
return self.client.get(reverse('wiki:get', kwargs={'path': path}))
class TemplateTestCase(TestCase):
@property
def template(self):
raise NotImplementedError("Subclasses must implement this")
def render(self, context):
return Template(self.template).render(Context(context))
# See
# https://github.com/django-wiki/django-wiki/pull/382
class wiki_override_settings(override_settings):
def enable(self):
super().enable()
self.reload_wiki_settings()
def disable(self):
super().disable()
self.reload_wiki_settings()
def reload_wiki_settings(self):
from importlib import reload
from wiki.conf import settings
reload(settings)
| gpl-3.0 | 8,459,457,111,415,504,000 | 24.573529 | 100 | 0.679126 | false |
gratipay/gratipay.com | tests/py/test_tip_migration.py | 1 | 3177 | from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from gratipay.testing import Harness
from gratipay.models.team.tip_migration import AlreadyMigrated, migrate_all_tips
class Tests(Harness):
def setUp(self):
self.admin = self.make_participant('admin', is_admin=True)
self.alice = self.make_participant('alice', claimed_time='now')
self.bob = self.make_participant('bob', claimed_time='now')
self.make_participant('old_team')
self.make_tip(self.alice, 'old_team', '1.00')
self.make_tip(self.bob, 'old_team', '2.00')
self.new_team = self.make_team('new_team', owner='old_team', is_approved=True)
def setTeamStatus(self, status):
self.client.POST('/new_team/set-status.json', data={'status': status}, auth_as='admin')
def capturer(self):
captured = []
def capture(line):
captured.append(line)
return capture, captured
# mt - migrate_tips
def test_mt_migrates_tips_to_payment_instructions(self):
assert self.new_team.migrate_tips() == 2
payment_instructions = self.db.all("SELECT * FROM payment_instructions "
"ORDER BY participant_id ASC")
assert len(payment_instructions) == 2
assert payment_instructions[0].participant_id == self.alice.id
assert payment_instructions[0].team_id == self.new_team.id
assert payment_instructions[0].amount == 1
assert payment_instructions[1].participant_id == self.bob.id
assert payment_instructions[1].team_id == self.new_team.id
assert payment_instructions[1].amount == 2
def test_mt_only_runs_once(self):
self.new_team.migrate_tips()
with pytest.raises(AlreadyMigrated):
self.new_team.migrate_tips()
assert len(self.db.all("SELECT * FROM payment_instructions")) == 2
def test_mt_checks_for_multiple_teams(self):
self.new_team.migrate_tips()
newer_team = self.make_team('newer_team', owner='old_team')
with pytest.raises(AlreadyMigrated):
newer_team.migrate_tips()
assert len(self.db.all("SELECT * FROM payment_instructions")) == 2
# mat - migrate_all_tips
def test_mat_migrates_all_tips(self):
capture, captured = self.capturer()
migrate_all_tips(self.db, capture)
assert captured == ["Migrated 2 tip(s) for 'new_team'", "Done."]
def test_mat_ignores_already_migrated_teams(self):
capture, captured = self.capturer()
migrate_all_tips(self.db, capture)
del captured[:] # clear first run output
migrate_all_tips(self.db, capture)
assert captured == ["Done."]
def test_mat_ignores_unreviewed_teams(self):
self.setTeamStatus('unreviewed')
capture, captured = self.capturer()
migrate_all_tips(self.db, capture)
assert captured == ["Done."]
def test_mat_ignores_rejected_teams(self):
self.setTeamStatus('rejected')
capture, captured = self.capturer()
migrate_all_tips(self.db, capture)
assert captured == ["Done."]
| mit | -5,699,981,286,030,426,000 | 37.743902 | 95 | 0.635505 | false |
abhishekraok/GraphMap | graphmap/perf_tester.py | 1 | 5189 | import cStringIO
import random
import time
import urllib2
from datetime import datetime
class TileTimeStat:
def __init__(self, x, y, z, time_taken_ms, http_code, time_of_request):
self.x = x
self.y = y
self.z = z
self.time_taken_ms = time_taken_ms
self.http_code = http_code
self.time_of_request = time_of_request
def __repr__(self):
return 'TileTimeStat({},{},{},{} ms. HTTP {})'.format(self.x, self.y, self.z, self.time_taken_ms,
self.http_code)
def to_tsv(self):
return '\t'.join(str(i) for i in [self.x, self.y, self.z, self.time_taken_ms, self.http_code,
self.time_of_request.strftime("%Y%m%d-%H%M%S")])
class AverageTimeStats:
def __init__(self, tile_time_stats_list, end_point):
"""
:type tile_time_stats_list: list of TileTimeStat
"""
self.tile_time_stats_list = tile_time_stats_list
self._average_time_ms = None
self.end_point = end_point
def calc_average_time_ms(self):
if self._average_time_ms is None:
self._average_time_ms = float(sum(i.time_taken_ms for i in self.tile_time_stats_list
if i.http_code is not 404)) \
/ len(self.tile_time_stats_list)
return self._average_time_ms
def __str__(self):
return 'Average time for {} tiles is {}'.format(len(self.tile_time_stats_list), self.calc_average_time_ms())
def serialize_to_string(self):
return (self.end_point + '\t' + i.to_tsv() + '\n' for i in self.tile_time_stats_list)
def append_to_file(self, filename=None):
if filename is None:
header = '\t'.join(['End Point', 'x', 'y', 'z', 'time taken in ms', 'http code', 'time of request']) + '\n'
final_filename = 'time_stats' + time.strftime("%Y%m%d-%H%M%S") + '.tsv'
else:
final_filename = filename
header = ''
print('Appending to file {} end point {}'.format(final_filename, self.end_point))
with open(final_filename, 'a') as f:
f.write(header)
f.writelines(self.serialize_to_string())
return final_filename
def generate_multiple_random_tiles(count, max_lod):
tile_list = []
for i in xrange(count):
z = random.randint(0, max_lod)
x = random.randint(0, 2 ** z)
y = random.randint(0, 2 ** z)
tile_list.append((x, y, z))
return tile_list
class PerfTester:
def __init__(self, endpoint=None, verbose=False, node_link=None):
self.end_point = endpoint or 'http://localhost:5555'
self.verbose = verbose
self.node_link = node_link
def tile_url(self, x, y, z):
tile_url = self.end_point + '/tile/{}/{}/{}'.format(z, x, y)
if self.node_link:
tile_url += '&node_link=' + self.node_link
return tile_url
def measure_xyz(self, x, y, z):
url = self.tile_url(x, y, z)
start_time = time.time()
http_code = 200
time_of_request = datetime.now()
try:
tile_image_io = cStringIO.StringIO(urllib2.urlopen(url).read())
except urllib2.HTTPError as e:
if e.code == 404:
http_code = 404
end_time = time.time()
time_taken_ms = round((end_time - start_time) * 10 ** 3, 2)
tile_stat = TileTimeStat(x, y, z, time_taken_ms, http_code, time_of_request)
if self.verbose:
print(tile_stat)
return tile_stat
def repeat_measure_xyz(self, x, y, z, repeat=3):
stats = []
for i in range(repeat):
ts = self.measure_xyz(x, y, z)
stats.append(ts)
if ts.http_code == 404:
return stats
time.sleep(0.1)
return stats
def perf_test_multiple_tiles(self, tile_list_to_hit):
all_stats = []
for tile in tile_list_to_hit:
all_stats.extend(self.repeat_measure_xyz(*tile))
return all_stats
def perf_test_random_tiles(self, count, max_lod):
print('Testing random tiles for {} count, with end point {} and node link {}'
.format(count, self.end_point, self.node_link))
tiles_to_hit = generate_multiple_random_tiles(count, max_lod)
all_stats = self.perf_test_multiple_tiles(tiles_to_hit)
return AverageTimeStats(all_stats, self.end_point)
def perf_local(count=100):
print('localhost')
local_performance_tester = PerfTester(verbose=True)
avg_local = local_performance_tester.perf_test_random_tiles(count, 20)
print(avg_local)
avg_local.append_to_file()
def perf_kaii(count=100):
print('kaiimap')
kaiimap_performance_tester = PerfTester(endpoint='http://kaiimap.org', verbose=True,
node_link='start@https://artmapstore.blob.core.windows.net/firstnodes/user/abhishek/start.ver_10.tsv')
avg_kaii = kaiimap_performance_tester.perf_test_random_tiles(count, max_lod=20)
print (avg_kaii)
avg_kaii.append_to_file()
| apache-2.0 | 4,986,538,957,537,860,000 | 36.064286 | 146 | 0.56851 | false |
chris-martin/refund-calculation | python/refund_calculation/tests/test_history_properties.py | 1 | 2426 | from __future__ import absolute_import
import hypothesis
import hypothesis.searchstrategy
import hypothesis.testdecorators
import sure
from collections import namedtuple
from decimal import Decimal
from itertools import count
import sys
from .. import *
from ..util import zip
zero = Decimal('0')
@hypothesis.searchstrategy.strategy_for(Decimal)
class DecimalStrategy(hypothesis.searchstrategy.MappedSearchStrategy):
def __init__(self, strategies, descriptor, **kwargs):
hypothesis.searchstrategy.SearchStrategy.__init__(
self, strategies, descriptor, **kwargs)
self.mapped_strategy = strategies.strategy(int)
def pack(self, x):
return Decimal(x) / 100
def unpack(self, x):
return int(x * 100)
EventSequence = namedtuple('EventSequence', ('events',))
@hypothesis.searchstrategy.strategy_for(EventSequence)
class EventSequenceStrategy(hypothesis.searchstrategy.MappedSearchStrategy):
def __init__(self, strategies, descriptor, **kwargs):
hypothesis.searchstrategy.SearchStrategy.__init__(
self, strategies, descriptor, **kwargs)
self.mapped_strategy = strategies.strategy([Decimal])
def pack(self, xs):
return EventSequence(make_event_sequence(xs, count(1)))
def unpack(self, xs):
return [x.delta for x in xs.events]
is_sorted = lambda xs: all(a <= b for a, b in zip(xs[:-1], xs[1:]))
@hypothesis.testdecorators.given(EventSequence)
def test_window_order(event_sequence):
"""Windows should be returned in chronological order."""
history = history_from_event_sequence(event_sequence.events)
assert is_sorted([
(w.start, w.end if w.end is not None else sys.maxsize)
for w in history.windows
]), unicode(history.windows)
@hypothesis.testdecorators.given(EventSequence)
def test_delta_sum(event_sequence):
"""The sum of open windows should equal the sum of the deltas."""
history = history_from_event_sequence(event_sequence.events)
sum_of_deltas = sum(e.delta for e in event_sequence.events)
if sum_of_deltas > zero:
sum(w.amount for w in history.open).should.equal(sum_of_deltas)
history.debt.should.equal(zero)
elif sum_of_deltas < zero:
len(history.open).should.equal(0)
history.debt.should.equal(-sum_of_deltas)
else:
len(history.open).should.equal(0)
history.debt.should.equal(zero)
| mit | -5,923,718,451,341,819,000 | 28.585366 | 76 | 0.699918 | false |
tehasdf/AdventOfCode2016 | p23.py | 1 | 1576 |
def p1(inp):
r = dict.fromkeys('abcd', 0)
r['a'] = 12
def getval(x):
return r[x] if x in r else int(x)
instrs = []
for line in inp:
parts = line.strip().split()
cmd = parts[0]
instrs.append([cmd, parts[1:]])
i = 0
c = 0
import pudb; pu.db # NOQA
while i < len(instrs):
if instrs[i:i + 5] == [
['inc', ['a']],
['dec', ['c']],
['jnz', ['c', '-2']],
['dec', ['d']],
['jnz', ['d', '-5']]
]:
r['a'] += getval('c') * getval('d')
i += 5
continue
cmd, args = instrs[i]
if cmd == 'cpy':
if args[1] in r:
r[args[1]] = getval(args[0])
elif cmd == 'jnz':
if getval(args[0]):
i += getval(args[1]) - 1
elif cmd == 'dec':
r[args[0]] -= 1
elif cmd == 'inc':
r[args[0]] += 1
elif cmd == 'tgl':
pos = i + getval(args[0])
if pos >= len(instrs):
i += 1
c += 1
if c % 100000 == 0:
print c, i, r
continue
tgt = instrs[pos]
if len(tgt[1]) == 2:
tgt[0] = 'cpy' if tgt[0] == 'jnz' else 'jnz'
elif len(tgt[1]) == 1:
tgt[0] = 'dec' if tgt[0] == 'inc' else 'inc'
i += 1
c += 1
if c % 100000 == 0:
print c, i, r
return r['a']
with open('input_23.txt') as f:
print p1(f)
| mit | 4,577,882,635,814,515,000 | 24.836066 | 60 | 0.350254 | false |
akvo/akvo-rsr | akvo/rsr/migrations/0014_auto_20150803_1017.py | 1 | 2541 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import akvo.rsr.fields
def unique_organisation_names(apps, schema_editor):
"""Make sure that the organisation.name and organisation.long_name are unique."""
Organisation = apps.get_model('rsr', 'Organisation')
org_double_name = {}
org_double_long_name = {}
for organisation in Organisation.objects.all():
org_names = Organisation.objects.filter(name=organisation.name)
if org_names.count() > 1 and organisation.name not in org_double_name:
org_double_name[organisation.name] = org_names
org_long_names = Organisation.objects.filter(long_name=organisation.long_name)
if org_long_names.count() > 1 and organisation.long_name not in org_double_long_name:
org_double_long_name[organisation.long_name] = org_long_names
for double_name in org_double_name:
org_qs = org_double_name[double_name]
for i, org in enumerate(org_qs):
if not i == 0:
if len(org.name) > 21:
org.name = org.name[:-4] + ' (' + str(i + 1) + ')'
else:
org.name += ' (' + str(i + 1) + ')'
org.save()
for double_long_name in org_double_long_name:
org_ln_qs = org_double_long_name[double_long_name]
for j, org in enumerate(org_ln_qs):
if not j == 0:
if len(org.long_name) > 71:
org.long_name = org.long_name[:-4] + ' (' + str(j + 1) + ')'
else:
org.long_name += ' (' + str(j + 1) + ')'
org.save()
class Migration(migrations.Migration):
dependencies = [
('rsr', '0013_auto_20150803_0905'),
]
operations = [
migrations.RunPython(
unique_organisation_names
),
migrations.AlterField(
model_name='organisation',
name='long_name',
field=akvo.rsr.fields.ValidXMLCharField(help_text='Full name of organisation (75 characters).', unique=True, max_length=75, verbose_name='long name', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='organisation',
name='name',
field=akvo.rsr.fields.ValidXMLCharField(help_text='Short name which will appear in organisation and partner listings (25 characters).', unique=True, max_length=25, verbose_name='name', db_index=True),
preserve_default=True,
),
]
| agpl-3.0 | -1,828,263,784,954,637,800 | 36.925373 | 212 | 0.576151 | false |
CLVsol/clvsol_odoo_addons | clv_family_aux/models/family.py | 1 | 3314 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class FamilyAux(models.Model):
_inherit = 'clv.family_aux'
related_family_is_unavailable = fields.Boolean(
string='Related Family is unavailable',
default=False,
)
related_family_id = fields.Many2one(comodel_name='clv.family', string='Related Family', ondelete='restrict')
related_family_name = fields.Char(string='Related Family Name', related='related_family_id.name')
related_family_code = fields.Char(string='Related Family Code', related='related_family_id.code')
related_family_category_ids = fields.Many2many(
comodel_name='clv.family.category',
string='Related Family Categories',
related='related_family_id.category_ids'
)
related_family_ref_address_id = fields.Many2one(
comodel_name='clv.address',
string='Related Family Address',
related='related_family_id.ref_address_id'
)
related_family_ref_address_code = fields.Char(
string='Related Family Address Code',
related='related_family_id.ref_address_id.code'
)
related_family_ref_address_category_ids = fields.Many2many(
comodel_name='clv.address.category',
string='Related Family Address Categories',
related='related_family_id.ref_address_id.category_ids'
)
@api.multi
def do_family_aux_get_related_family_data(self):
for family_aux in self:
_logger.info(u'>>>>> %s', family_aux.related_family_id)
# if (family_aux.reg_state in ['draft', 'revised']) and \
# (family_aux.related_family_id.id is not False):
if (family_aux.related_family_id.id is not False):
data_values = {}
data_values['name'] = family_aux.related_family_id.name
data_values['code'] = family_aux.related_family_id.code
if self.related_family_id.ref_address_id.id is not False:
data_values['ref_address_id'] = family_aux.related_family_id.ref_address_id.id
data_values['street'] = family_aux.related_family_id.ref_address_id.street
data_values['street2'] = family_aux.related_family_id.ref_address_id.street2
data_values['zip'] = family_aux.related_family_id.ref_address_id.zip
data_values['city'] = family_aux.related_family_id.ref_address_id.city
data_values['state_id'] = family_aux.related_family_id.ref_address_id.state_id.id
data_values['country_id'] = family_aux.related_family_id.ref_address_id.country_id.id
data_values['phone'] = family_aux.related_family_id.ref_address_id.phone
data_values['mobile'] = family_aux.related_family_id.ref_address_id.mobile
if family_aux.related_family_id.family_id.id is not False:
data_values['family_id'] = family_aux.related_family_id.family_id.id
_logger.info(u'>>>>>>>>>> %s', data_values)
family_aux.write(data_values)
return True
| agpl-3.0 | 4,554,583,028,333,337,000 | 41.487179 | 112 | 0.626132 | false |
kaushik94/tardis | tardis/montecarlo/tests/test_base.py | 1 | 1152 | import os
import pandas as pd
import numpy as np
import pytest
from astropy import units as u
from numpy.testing import assert_almost_equal
###
# Save and Load
###
@pytest.fixture(scope="module", autouse=True)
def to_hdf_buffer(hdf_file_path, simulation_verysimple):
simulation_verysimple.runner.to_hdf(hdf_file_path, name='runner')
runner_properties = ['output_nu', 'output_energy', 'nu_bar_estimator',
'j_estimator', 'montecarlo_virtual_luminosity',
'last_interaction_in_nu',
'last_interaction_type',
'last_line_interaction_in_id',
'last_line_interaction_out_id',
'last_line_interaction_shell_id',
'packet_luminosity']
@pytest.mark.parametrize("attr", runner_properties)
def test_hdf_runner(hdf_file_path, simulation_verysimple, attr):
actual = getattr(simulation_verysimple.runner, attr)
if hasattr(actual, 'cgs'):
actual = actual.cgs.value
path = os.path.join('runner', attr)
expected = pd.read_hdf(hdf_file_path, path)
assert_almost_equal(actual, expected.values)
| bsd-3-clause | 120,374,102,820,287,300 | 35 | 70 | 0.640625 | false |
kepstin/picard | picard/ui/ui_options_metadata.py | 1 | 7398 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/options_metadata.ui'
#
# Created: Fri May 24 09:20:07 2013
# by: PyQt4 UI code generator 4.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MetadataOptionsPage(object):
def setupUi(self, MetadataOptionsPage):
MetadataOptionsPage.setObjectName(_fromUtf8("MetadataOptionsPage"))
MetadataOptionsPage.resize(423, 553)
self.verticalLayout = QtGui.QVBoxLayout(MetadataOptionsPage)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.metadata_groupbox = QtGui.QGroupBox(MetadataOptionsPage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.metadata_groupbox.sizePolicy().hasHeightForWidth())
self.metadata_groupbox.setSizePolicy(sizePolicy)
self.metadata_groupbox.setMinimumSize(QtCore.QSize(397, 135))
self.metadata_groupbox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.metadata_groupbox.setObjectName(_fromUtf8("metadata_groupbox"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.metadata_groupbox)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.translate_artist_names = QtGui.QCheckBox(self.metadata_groupbox)
self.translate_artist_names.setObjectName(_fromUtf8("translate_artist_names"))
self.verticalLayout_3.addWidget(self.translate_artist_names)
self.artist_locale = QtGui.QComboBox(self.metadata_groupbox)
self.artist_locale.setObjectName(_fromUtf8("artist_locale"))
self.verticalLayout_3.addWidget(self.artist_locale)
self.standardize_artists = QtGui.QCheckBox(self.metadata_groupbox)
self.standardize_artists.setObjectName(_fromUtf8("standardize_artists"))
self.verticalLayout_3.addWidget(self.standardize_artists)
self.convert_punctuation = QtGui.QCheckBox(self.metadata_groupbox)
self.convert_punctuation.setObjectName(_fromUtf8("convert_punctuation"))
self.verticalLayout_3.addWidget(self.convert_punctuation)
self.release_ars = QtGui.QCheckBox(self.metadata_groupbox)
self.release_ars.setObjectName(_fromUtf8("release_ars"))
self.verticalLayout_3.addWidget(self.release_ars)
self.track_ars = QtGui.QCheckBox(self.metadata_groupbox)
self.track_ars.setObjectName(_fromUtf8("track_ars"))
self.verticalLayout_3.addWidget(self.track_ars)
self.folksonomy_tags = QtGui.QCheckBox(self.metadata_groupbox)
self.folksonomy_tags.setObjectName(_fromUtf8("folksonomy_tags"))
self.verticalLayout_3.addWidget(self.folksonomy_tags)
self.verticalLayout.addWidget(self.metadata_groupbox)
self.custom_fields_groupbox = QtGui.QGroupBox(MetadataOptionsPage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.custom_fields_groupbox.sizePolicy().hasHeightForWidth())
self.custom_fields_groupbox.setSizePolicy(sizePolicy)
self.custom_fields_groupbox.setMinimumSize(QtCore.QSize(397, 0))
self.custom_fields_groupbox.setObjectName(_fromUtf8("custom_fields_groupbox"))
self.gridlayout = QtGui.QGridLayout(self.custom_fields_groupbox)
self.gridlayout.setSpacing(2)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.label_6 = QtGui.QLabel(self.custom_fields_groupbox)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridlayout.addWidget(self.label_6, 0, 0, 1, 2)
self.label_7 = QtGui.QLabel(self.custom_fields_groupbox)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridlayout.addWidget(self.label_7, 2, 0, 1, 2)
self.nat_name = QtGui.QLineEdit(self.custom_fields_groupbox)
self.nat_name.setObjectName(_fromUtf8("nat_name"))
self.gridlayout.addWidget(self.nat_name, 3, 0, 1, 1)
self.nat_name_default = QtGui.QPushButton(self.custom_fields_groupbox)
self.nat_name_default.setObjectName(_fromUtf8("nat_name_default"))
self.gridlayout.addWidget(self.nat_name_default, 3, 1, 1, 1)
self.va_name_default = QtGui.QPushButton(self.custom_fields_groupbox)
self.va_name_default.setObjectName(_fromUtf8("va_name_default"))
self.gridlayout.addWidget(self.va_name_default, 1, 1, 1, 1)
self.va_name = QtGui.QLineEdit(self.custom_fields_groupbox)
self.va_name.setObjectName(_fromUtf8("va_name"))
self.gridlayout.addWidget(self.va_name, 1, 0, 1, 1)
self.verticalLayout.addWidget(self.custom_fields_groupbox)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.MinimumExpanding)
self.verticalLayout.addItem(spacerItem)
self.label_6.setBuddy(self.va_name_default)
self.label_7.setBuddy(self.nat_name_default)
self.retranslateUi(MetadataOptionsPage)
QtCore.QMetaObject.connectSlotsByName(MetadataOptionsPage)
MetadataOptionsPage.setTabOrder(self.translate_artist_names, self.artist_locale)
MetadataOptionsPage.setTabOrder(self.artist_locale, self.standardize_artists)
MetadataOptionsPage.setTabOrder(self.standardize_artists, self.convert_punctuation)
MetadataOptionsPage.setTabOrder(self.convert_punctuation, self.release_ars)
MetadataOptionsPage.setTabOrder(self.release_ars, self.track_ars)
MetadataOptionsPage.setTabOrder(self.track_ars, self.folksonomy_tags)
MetadataOptionsPage.setTabOrder(self.folksonomy_tags, self.va_name)
MetadataOptionsPage.setTabOrder(self.va_name, self.va_name_default)
MetadataOptionsPage.setTabOrder(self.va_name_default, self.nat_name)
MetadataOptionsPage.setTabOrder(self.nat_name, self.nat_name_default)
def retranslateUi(self, MetadataOptionsPage):
self.metadata_groupbox.setTitle(_("Metadata"))
self.translate_artist_names.setText(_("Translate artist names to this locale where possible:"))
self.standardize_artists.setText(_("Use standardized artist names"))
self.convert_punctuation.setText(_("Convert Unicode punctuation characters to ASCII"))
self.release_ars.setText(_("Use release relationships"))
self.track_ars.setText(_("Use track relationships"))
self.folksonomy_tags.setText(_("Use folksonomy tags as genre"))
self.custom_fields_groupbox.setTitle(_("Custom Fields"))
self.label_6.setText(_("Various artists:"))
self.label_7.setText(_("Non-album tracks:"))
self.nat_name_default.setText(_("Default"))
self.va_name_default.setText(_("Default"))
| gpl-2.0 | 3,693,362,362,089,422,000 | 57.714286 | 109 | 0.72479 | false |
OutOfOrder/sshproxy | lib/ini_db/client.py | 1 | 5248 | #!/usr/bin/env python
# -*- coding: ISO-8859-15 -*-
#
# Copyright (C) 2005-2007 David Guerizec <[email protected]>
#
# Last modified: 2007 Dec 08, 20:10:26 by david
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os, os.path, sha
from ConfigParser import NoSectionError, SafeConfigParser as ConfigParser
from sshproxy.config import ConfigSection, path, get_config
from sshproxy.client import ClientDB, ClientInfo
from sshproxy.util import istrue
class FileClientConfigSection(ConfigSection):
section_id = 'client_db.ini'
section_defaults = {
'file': '@client.db',
}
types = {
'file': path,
}
FileClientConfigSection.register()
class FileClientInfo(ClientInfo):
def get_config_file(self):
clientfile = get_config('client_db.ini')['file']
if not os.path.exists(clientfile):
open(clientfile, 'w').close()
os.chmod(clientfile, 0600)
# no need to parse an empty file
return None
file = ConfigParser()
file.read(clientfile)
return file
def load(self):
file = self.get_config_file()
if not file:
return
try:
tokens = dict(file.items(self.username))
except NoSectionError:
return
self.set_tokens(**tokens)
def save(self, file=None):
if not file:
file = self.get_config_file()
if not file:
return
if self.username:
if not file.has_section(self.username):
file.add_section(self.username)
for tag, value in self.tokens.items():
if tag in ('username', 'ip_addr'):
continue
elif value and str(value):
file.set(self.username, tag, str(value))
elif file.has_option(self.username, tag):
file.remove_option(self.username, tag)
clientfile = get_config('client_db.ini')['file']
fd = open(clientfile+'.new', 'w')
file.write(fd)
fd.close()
os.rename(clientfile+'.new', clientfile)
def delete(self, username):
file = self.get_config_file()
if not file:
return
if file.has_section(username):
file.remove_section(username)
self.save(file)
def auth_token_order(self):
return ('pubkey', 'pkey', 'password')
def authenticate(self, **tokens):
from sshproxy import log
resp = False
for token in self.auth_token_order():
if token in tokens.keys() and tokens[token] is not None:
if token == 'password':
if (sha.new(tokens[token]).hexdigest()
== self.get_token(token)):
resp = True
break
elif token in ('pubkey', 'pkey'):
pubkeys = self.get_token('pubkey',
self.get_token('pkey', '')).split('\n')
pubkeys = [ pk.split()[0] for pk in pubkeys if len(pk) ]
for pk in pubkeys:
if pk == tokens[token]:
resp = True
break
ClientDB()._unauth_pubkey = tokens[token]
elif self.get_token(token) == tokens[token]:
resp = True
break
return resp
def exists(self, username):
file = self.get_config_file()
if not file:
return
return file.has_section(username)
def list_clients(self, **kw):
file = self.get_config_file()
if not file:
return
return file.sections()
class FileClientDB(ClientDB):
def exists(self, username, **tokens):
if not getattr(self, 'clientinfo', None):
return ClientInfo(None).exists(username)
return self.clientinfo.exists(username)
def list_clients(self, **kw):
return ClientInfo(None).list_clients(**kw)
def add_client(self, username, **tokens):
if self.exists(username):
return 'Client %s does already exist' % username
client = ClientInfo(username, **tokens)
client.save()
return 'Client %s added' % username
def del_client(self, username, **tokens):
if not self.exists(username):
return 'Client %s does not exist'
ClientInfo(None).delete(username)
return 'Client %s deleted.' % username
| gpl-2.0 | -5,217,639,128,225,009,000 | 30.614458 | 79 | 0.570503 | false |
dkamotsky/program-y | src/test/extensions/weather/test_aiml.py | 1 | 1526 | import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.brain import BrainFileConfiguration
class WeathersTestsClient(TestClient):
def __init__(self):
TestClient.__init__(self, debug=True)
def load_configuration(self, arguments):
super(WeathersTestsClient, self).load_configuration(arguments)
self.configuration.brain_configuration._aiml_files = BrainFileConfiguration(files=os.path.dirname(__file__))
class WeathersAIMLTests(unittest.TestCase):
def setUp (self):
WeathersAIMLTests.test_client = WeathersTestsClient()
latlong = os.path.dirname(__file__) + "/google_latlong.json"
observation = os.path.dirname(__file__) + "/observation.json"
threehourly = os.path.dirname(__file__) + "/forecast_3hourly.json"
daily = os.path.dirname(__file__) + "/forecast_daily.json"
WeathersAIMLTests.test_client.bot.license_keys.load_license_key_data("""
GOOGLE_LATLONG=%s
METOFFICE_API_KEY=TESTKEY
CURRENT_OBSERVATION_RESPONSE_FILE=%s
THREE_HOURLY_FORECAST_RESPONSE_FILE=%s
DAILY_FORECAST_RESPONSE_FILE=%s
"""%(latlong, observation, threehourly, daily))
def test_weather(self):
response = WeathersAIMLTests.test_client.bot.ask_question("testid", "WEATHER LOCATION KY39UR WHEN TODAY")
self.assertIsNotNone(response)
self.assertEqual(response, "Today the weather is Partly cloudy (day) , with a temperature of 12 dot 3 \'C")
| mit | 7,791,428,641,873,867,000 | 39.157895 | 116 | 0.690039 | false |
pbabik/minstrel | get_lat_lon_exif_pil.py | 1 | 2051 | from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
def get_exif_data(image):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
info = image._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
def _convert_to_degress(value):
"""Helper function to convert the GPS coordinates stored in the EXIF to degress in float format"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_lat_lon(exif_data):
"""Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)"""
lat = 0
lon = 0
if "GPSInfo" in exif_data:
gps_info = exif_data["GPSInfo"]
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon
| gpl-2.0 | -1,580,035,933,895,308,300 | 29.61194 | 126 | 0.557289 | false |
frumiousbandersnatch/supybot-code | plugins/ShrinkUrl/config.py | 1 | 4888 | ###
# Copyright (c) 2005, Jeremiah Fincher
# Copyright (c) 2009-2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
from supybot.questions import output, expect, anything, something, yn
conf.registerPlugin('ShrinkUrl', True)
if yn("""This plugin offers a snarfer that will go retrieve a shorter
version of long URLs that are sent to the channel. Would you
like this snarfer to be enabled?""", default=False):
conf.supybot.plugins.ShrinkUrl.shrinkSnarfer.setValue(True)
class ShrinkService(registry.OnlySomeStrings):
"""Valid values include 'ln', 'tiny', 'xrl', and 'x0'."""
validStrings = ('ln', 'tiny', 'xrl', 'x0')
class ShrinkCycle(registry.SpaceSeparatedListOfStrings):
"""Valid values include 'ln', 'tiny', 'xrl', and 'x0'."""
Value = ShrinkService
def __init__(self, *args, **kwargs):
super(ShrinkCycle, self).__init__(*args, **kwargs)
self.lastIndex = -1
def setValue(self, v):
super(ShrinkCycle, self).setValue(v)
self.lastIndex = -1
def getService(self):
L = self()
if L:
self.lastIndex = (self.lastIndex + 1) % len(L)
return L[self.lastIndex]
raise ValueError, \
'No services have been configured for rotation. ' \
'See conf.supybot.plugins.ShrinkUrl.serviceRotation.'
ShrinkUrl = conf.registerPlugin('ShrinkUrl')
conf.registerChannelValue(ShrinkUrl, 'shrinkSnarfer',
registry.Boolean(False, """Determines whether the
shrink snarfer is enabled. This snarfer will watch for URLs in the
channel, and if they're sufficiently long (as determined by
supybot.plugins.ShrinkUrl.minimumLength) it will post a
smaller URL from either ln-s.net or tinyurl.com, as denoted in
supybot.plugins.ShrinkUrl.default."""))
conf.registerChannelValue(ShrinkUrl.shrinkSnarfer, 'showDomain',
registry.Boolean(True, """Determines whether the snarfer will show the
domain of the URL being snarfed along with the shrunken URL."""))
conf.registerChannelValue(ShrinkUrl, 'minimumLength',
registry.PositiveInteger(48, """The minimum length a URL must be before
the bot will shrink it."""))
conf.registerChannelValue(ShrinkUrl, 'nonSnarfingRegexp',
registry.Regexp(None, """Determines what URLs are to be snarfed; URLs
matching the regexp given will not be snarfed. Give the empty string if
you have no URLs that you'd like to exclude from being snarfed."""))
conf.registerChannelValue(ShrinkUrl, 'outFilter',
registry.Boolean(False, """Determines whether the bot will shrink the URLs
of outgoing messages if those URLs are longer than
supybot.plugins.ShrinkUrl.minimumLength."""))
conf.registerChannelValue(ShrinkUrl, 'default',
ShrinkService('ln', """Determines what website the bot will use when
shrinking a URL."""))
conf.registerGlobalValue(ShrinkUrl, 'bold',
registry.Boolean(True, """Determines whether this plugin will bold certain
portions of its replies."""))
conf.registerChannelValue(ShrinkUrl, 'serviceRotation',
ShrinkCycle([], """If set to a non-empty value, specifies the list of
services to rotate through for the shrinkSnarfer and outFilter."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | 120,014,738,128,993,890 | 48.373737 | 79 | 0.727291 | false |
pombredanne/pytype | pytype/tests/test_load_pytd.py | 1 | 5255 | """Tests for load_pytd.py."""
from pytype import load_pytd
from pytype import utils
import unittest
class ImportPathsTest(unittest.TestCase):
"""Tests for load_pytd.py."""
PYTHON_VERSION = (2, 7)
def testBuiltinSys(self):
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION)
ast = loader.import_name("sys")
self.assertTrue(ast.Lookup("sys.exit"))
def testBasic(self):
with utils.Tempdir() as d:
d.create_file("path/to/some/module.pytd", "def foo(x:int) -> str")
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION,
pythonpath=[d.path])
ast = loader.import_name("path.to.some.module")
self.assertTrue(ast.Lookup("path.to.some.module.foo"))
def testCustomExtension(self):
with utils.Tempdir() as d:
d.create_file("path/to/some/module.dat", "def foo() -> str")
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION,
pythonpath=[d.path],
find_pytd_import_ext=".dat"
)
ast = loader.import_name("path.to.some.module")
self.assertTrue(ast.Lookup("path.to.some.module.foo"))
def testStripPrefix(self):
with utils.Tempdir() as d:
d.create_file("path/to/some/module.pytd", "def foo() -> str")
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION,
pythonpath=[d.path],
import_drop_prefixes=("extra.long",
"even.longer")
)
self.assertTrue(loader.import_name("extra.long.path.to.some.module"))
self.assertTrue(loader.import_name("even.longer.path.to.some.module"))
def testPath(self):
with utils.Tempdir() as d1:
with utils.Tempdir() as d2:
d1.create_file("dir1/module1.pytd", "def foo1() -> str")
d2.create_file("dir2/module2.pytd", "def foo2() -> str")
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION,
pythonpath=[d1.path, d2.path])
module1 = loader.import_name("dir1.module1")
module2 = loader.import_name("dir2.module2")
self.assertTrue(module1.Lookup("dir1.module1.foo1"))
self.assertTrue(module2.Lookup("dir2.module2.foo2"))
def testInit(self):
with utils.Tempdir() as d1:
d1.create_file("baz/__init__.pytd", "x: int")
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION,
pythonpath=[d1.path])
self.assertTrue(loader.import_name("baz").Lookup("baz.x"))
def testNoInit(self):
with utils.Tempdir() as d:
d.create_directory("baz")
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION,
pythonpath=[d.path])
self.assertTrue(loader.import_name("baz"))
def testStdlib(self):
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION)
ast = loader.import_name("StringIO")
self.assertTrue(ast.Lookup("StringIO.StringIO"))
def testDeepDependency(self):
with utils.Tempdir() as d:
d.create_file("module1.pytd", "def get_bar() -> module2.Bar")
d.create_file("module2.pytd", "class Bar:\n pass")
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION,
pythonpath=[d.path])
module1 = loader.import_name("module1")
f, = module1.Lookup("module1.get_bar").signatures
self.assertEquals("module2.Bar", f.return_type.cls.name)
def testCircularDependency(self):
with utils.Tempdir() as d:
d.create_file("foo.pytd", """
def get_bar() -> bar.Bar
class Foo:
pass
""")
d.create_file("bar.pytd", """
def get_foo() -> foo.Foo
class Bar:
pass
""")
loader = load_pytd.Loader("base", python_version=self.PYTHON_VERSION,
pythonpath=[d.path])
foo = loader.import_name("foo")
bar = loader.import_name("bar")
f1, = foo.Lookup("foo.get_bar").signatures
f2, = bar.Lookup("bar.get_foo").signatures
self.assertEquals("bar.Bar", f1.return_type.cls.name)
self.assertEquals("foo.Foo", f2.return_type.cls.name)
def testRelative(self):
with utils.Tempdir() as d:
d.create_file("__init__.pytd", "base: ?")
d.create_file("path/__init__.pytd", "path: ?")
d.create_file("path/to/__init__.pytd", "to: ?")
d.create_file("path/to/some/__init__.pytd", "some: ?")
d.create_file("path/to/some/module.pytd", "")
loader = load_pytd.Loader("path.to.some.module",
python_version=self.PYTHON_VERSION,
pythonpath=[d.path])
some = loader.import_relative(1)
to = loader.import_relative(2)
path = loader.import_relative(3)
# Python doesn't allow "...." here, so don't test import_relative(4).
self.assertTrue(some.Lookup("path.to.some.some"))
self.assertTrue(to.Lookup("path.to.to"))
self.assertTrue(path.Lookup("path.path"))
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 2,611,055,762,498,074,000 | 39.423077 | 77 | 0.583444 | false |
chauhankaranraj/hackathon2017 | TFKMeansCluster.py | 1 | 6066 | from categorize_clusters import categorize_clusters
import tensorflow as tf
from random import choice, shuffle
import numpy as np
import pickle
def main():
num_clusters = 3
with open('normal_hashes.obj', 'rb') as fHandler:
normal_hashes = pickle.load(fHandler)
with open('malware_hashes.obj', 'rb') as fHandler:
malware_hashes = pickle.load(fHandler)
train_vecs = np.concatenate((normal_hashes, malware_hashes), axis=0)
print("starting training")
cents, asmnts = TFKMeansCluster(train_vecs, num_clusters)
prob_dict = categorize_clusters(asmnts)
with open('centroids.obj', 'wb') as fHandler:
pickle.dump(cents, fHandler)
with open('assignments.obj', 'wb') as fHandler:
pickle.dump(asmnts, fHandler)
with open('prob_dict.obj', 'wb') as fHandler:
pickle.dump(prob_dict, fHandler)
def TFKMeansCluster(vectors, noofclusters):
"""
K-Means Clustering using TensorFlow.
'vectors' should be a n*k 2-D NumPy array, where n is the number
of vectors of dimensionality k.
'noofclusters' should be an integer.
"""
noofclusters = int(noofclusters)
assert noofclusters < len(vectors)
#Find out the dimensionality
dim = len(vectors[0])
#Will help select random centroids from among the available vectors
vector_indices = list(range(len(vectors)))
shuffle(vector_indices)
#GRAPH OF COMPUTATION
#We initialize a new graph and set it as the default during each run
#of this algorithm. This ensures that as this function is called
#multiple times, the default graph doesn't keep getting crowded with
#unused ops and Variables from previous function calls.
graph = tf.Graph()
with graph.as_default():
#SESSION OF COMPUTATION
sess = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
centroids = [tf.Variable((vectors[vector_indices[i]]))
for i in range(noofclusters)]
##These nodes will assign the centroid Variables the appropriate
##values
centroid_value = tf.placeholder("float64", [dim])
cent_assigns = []
for centroid in centroids:
cent_assigns.append(tf.assign(centroid, centroid_value))
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
assignments = [tf.Variable(0) for i in range(len(vectors))]
##These nodes will assign an assignment Variable the appropriate
##value
assignment_value = tf.placeholder("int32")
cluster_assigns = []
for assignment in assignments:
cluster_assigns.append(tf.assign(assignment,
assignment_value))
##Now lets construct the node that will compute the mean
#The placeholder for the input
mean_input = tf.placeholder("float", [None, dim])
#The Node/op takes the input and computes a mean along the 0th
#dimension, i.e. the list of input vectors
mean_op = tf.reduce_mean(mean_input, 0)
##Node for computing Euclidean distances
#Placeholders for input
v1 = tf.placeholder("float", [dim])
v2 = tf.placeholder("float", [dim])
euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(
v1, v2), 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
#Placeholder for input
centroid_distances = tf.placeholder("float", [noofclusters])
cluster_assignment = tf.argmin(centroid_distances, 0)
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
init_op = tf.global_variables_initializer()
#Initialize all variables
sess.run(init_op)
##CLUSTERING ITERATIONS
#Now perform the Expectation-Maximization steps of K-Means clustering
#iterations. To keep things simple, we will only do a set number of
#iterations, instead of using a Stopping Criterion.
noofiterations = 100
for iteration_n in range(noofiterations):
print("step ", iteration_n)
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
#Iterate over each vector
for vector_n in range(len(vectors)):
vect = vectors[vector_n]
#Compute Euclidean distance between this vector and each
#centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
#cluster assignment node.
distances = [sess.run(euclid_dist, feed_dict={
v1: vect, v2: sess.run(centroid)})
for centroid in centroids]
#Now use the cluster assignment node, with the distances
#as the input
assignment = sess.run(cluster_assignment, feed_dict = {
centroid_distances: distances})
#Now assign the value to the appropriate state variable
sess.run(cluster_assigns[vector_n], feed_dict={
assignment_value: assignment})
##MAXIMIZATION STEP
#Based on the expected state computed from the Expectation Step,
#compute the locations of the centroids so as to maximize the
#overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(noofclusters):
#Collect all the vectors assigned to this cluster
assigned_vects = [vectors[i] for i in range(len(vectors))
if sess.run(assignments[i]) == cluster_n]
#Compute new centroid location
new_location = sess.run(mean_op, feed_dict={
mean_input: np.array(assigned_vects)})
#Assign value to appropriate variable
sess.run(cent_assigns[cluster_n], feed_dict={
centroid_value: new_location})
#Return centroids and assignments
centroids = sess.run(centroids)
assignments = sess.run(assignments)
return centroids, assignments
if __name__ == '__main__':
main()
| mit | -1,452,658,285,003,858,200 | 32.662857 | 71 | 0.704088 | false |
alxgu/ansible | lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py | 1 | 13325 | #!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, (@yungezz)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_roledefinition
version_added: "2.8"
short_description: Manage Azure Role Definition.
description:
- Create, update and delete instance of Azure Role Definition.
options:
name:
description:
- Unique name of role definition.
required: True
permissions:
description:
- Set of role definition peremissions.
- See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
suboptions:
actions:
description:
- List of allowed actions.
type: list
not_actions:
description:
- List of denied actions.
type: list
data_actions:
description:
- List of allowed data actions.
type: list
not_data_actions:
description:
- List of denied data actions.
type: list
assignable_scopes:
description: List of assignable scope of this definition.
scope:
description: The scope of the role definition.
description:
description:
- The role definition description.
state:
description:
- Assert the state of the role definition.
- Use 'present' to create or update a role definition and 'absent' to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Yunge Zhu(@yungezz)"
'''
EXAMPLES = '''
- name: Create a role definition
azure_rm_roledefinition:
name: myTestRole
scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myresourceGroup
permissions:
- actions:
- "Microsoft.Compute/virtualMachines/read"
data_actions:
- "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
assignable_scopes:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
'''
RETURN = '''
id:
description: Id of current role definition.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/roleDefinitionId"
'''
import uuid
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils._text import to_native
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
from msrest.serialization import Model
from azure.mgmt.authorization import AuthorizationManagementClient
from azure.mgmt.authorization.model import (RoleDefinition, Permission)
except ImportError:
# This is handled in azure_rm_common
pass
permission_spec = dict(
actions=dict(
type='list',
options=dict(type='str')
),
not_actions=dict(
type='list',
options=dict(type='str')
),
data_actions=dict(
type='list',
options=dict(type='str')
),
not_data_actions=dict(
type='list',
options=dict(type='str')
),
)
def roledefinition_to_dict(role):
result = dict(
id=role.id,
name=role.name,
type=role.role_type,
assignable_scopes=role.assignable_scopes,
description=role.description,
role_name=role.role_name
)
if role.permissions:
result['permissions'] = [dict(
actions=p.actions,
not_actions=p.not_actions,
data_actions=p.data_actions,
not_data_actions=p.not_data_actions
) for p in role.permissions]
return result
class Actions:
NoAction, CreateOrUpdate, Delete = range(3)
class AzureRMRoleDefinition(AzureRMModuleBase):
"""Configuration class for an Azure RM Role definition resource"""
def __init__(self):
self.module_arg_spec = dict(
name=dict(
type='str',
required=True
),
scope=dict(
type='str'
),
permissions=dict(
type='list',
elements='dict',
options=permission_spec
),
assignable_scopes=dict(
type='list',
elements='str'
),
description=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.name = None
self.scope = None
self.permissions = None
self.description = None
self.assignable_scopes = None
self.results = dict(
changed=False,
id=None,
)
self.state = None
self.to_do = Actions.NoAction
self.role = None
self._client = None
super(AzureRMRoleDefinition, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
# get management client
self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version="2018-01-01-preview")
self.scope = self.build_scope()
# get existing role definition
old_response = self.get_roledefinition()
if old_response:
self.results['id'] = old_response['id']
self.role = old_response
if self.state == 'present':
# check if the role definition exists
if not old_response:
self.log("Role definition doesn't exist in this scope")
self.to_do = Actions.CreateOrUpdate
else:
# existing role definition, do update
self.log("Role definition already exists")
self.log('Result: {0}'.format(old_response))
# compare if role definition changed
if self.check_update(old_response):
self.to_do = Actions.CreateOrUpdate
elif self.state == 'absent':
if old_response:
self.log("Delete role defintion")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_roledefinition(old_response['name'])
self.log('role definition deleted')
else:
self.log("role definition {0} not exists.".format(self.name))
if self.to_do == Actions.CreateOrUpdate:
self.log('Need to Create/Update role definition')
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_roledefinition()
self.results['id'] = response['id']
return self.results
# build scope
def build_scope(self):
subscription_scope = '/subscription/' + self.subscription_id
if self.scope is None:
return subscription_scope
return self.scope
# check update
def check_update(self, old_definition):
if self.description and self.description != old_definition['properties']['description']:
return True
if self.permissions:
if len(self.permissions) != len(old_definition['permissions']):
return True
existing_permissions = self.permissions_to_set(old_definition['permissions'])
new_permissions = self.permissions_to_set(self.permissions)
if existing_permissions != new_permissions:
return True
if self.assignable_scopes and self.assignable_scopes != old_definition['assignable_scopes']:
return True
return False
def permissions_to_set(self, permissions):
new_permissions = [str(dict(
actions=(set([to_native(a) for a in item.get('actions')]) if item.get('actions') else None),
not_actions=(set([to_native(a) for a in item.get('not_actions')]) if item.get('not_actions') else None),
data_actions=(set([to_native(a) for a in item.get('data_actions')]) if item.get('data_actions') else None),
not_data_actions=(set([to_native(a) for a in item.get('not_data_actions')]) if item.get('not_data_actions') else None),
)) for item in permissions]
return set(new_permissions)
def create_update_roledefinition(self):
'''
Creates or updates role definition.
:return: deserialized role definition
'''
self.log("Creating / Updating role definition {0}".format(self.name))
try:
permissions = None
if self.permissions:
permissions = [AuthorizationManagementClient.models("2018-01-01-preview").Permission(
actions=p.get('actions', None),
not_actions=p.get('not_actions', None),
data_actions=p.get('data_actions', None),
not_data_actions=p.get('not_data_actions', None)
) for p in self.permissions]
role_definition = AuthorizationManagementClient.models("2018-01-01-preview").RoleDefinition(
role_name=self.name,
description=self.description,
permissions=permissions,
assignable_scopes=self.assignable_scopes,
role_type='CustomRole')
if self.role:
role_definition.name = self.role['name']
response = self._client.role_definitions.create_or_update(role_definition_id=self.role['name'] if self.role else str(uuid.uuid4()),
scope=self.scope,
role_definition=role_definition)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create role definition.')
self.fail("Error creating role definition: {0}".format(str(exc)))
return roledefinition_to_dict(response)
def delete_roledefinition(self, role_definition_id):
'''
Deletes specified role definition.
:return: True
'''
self.log("Deleting the role definition {0}".format(self.name))
scope = self.build_scope()
try:
response = self._client.role_definitions.delete(scope=scope,
role_definition_id=role_definition_id)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as e:
self.log('Error attempting to delete the role definition.')
self.fail("Error deleting the role definition: {0}".format(str(e)))
return True
def get_roledefinition(self):
'''
Gets the properties of the specified role definition.
:return: deserialized role definition state dictionary
'''
self.log("Checking if the role definition {0} is present".format(self.name))
response = None
try:
response = list(self._client.role_definitions.list(scope=self.scope))
if len(response) > 0:
self.log("Response : {0}".format(response))
roles = []
for r in response:
if r.role_name == self.name:
roles.append(r)
if len(roles) == 1:
self.log("role definition : {0} found".format(self.name))
return roledefinition_to_dict(roles[0])
if len(roles) > 1:
self.fail("Found multiple role definitions: {0}".format(roles))
except CloudError as ex:
self.log("Didn't find role definition {0}".format(self.name))
return False
def main():
"""Main execution"""
AzureRMRoleDefinition()
if __name__ == '__main__':
main()
| gpl-3.0 | -634,955,316,576,536,600 | 32.39599 | 143 | 0.564953 | false |
serendi-app/serendi-server | laufpartner_server/core/api/serializers.py | 1 | 1696 | # -*- coding: utf-8 -*-
# This file is part of Laufpartner.
#
# Laufpartner is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Laufpartner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from rest_framework import serializers
from laufpartner_server.core.models import Profile
class SearchSerializer(serializers.Serializer):
'''
Serializes the parameters needed to perform a search
'''
radius = serializers.IntegerField(required=True,
max_value=30)
'''
Radius to search users in
'''
gender = serializers.MultipleChoiceField(required=True,
choices=Profile.GENDER)
'''
Users gender to match
'''
level = serializers.MultipleChoiceField(required=True,
choices=Profile.LEVEL)
'''
Users fitness level to match
'''
age = serializers.MultipleChoiceField(required=True,
choices=Profile.AGE)
'''
Users age bracket to match
'''
time = serializers.MultipleChoiceField(required=True,
choices=Profile.TIME)
'''
Users time for running to match
'''
| agpl-3.0 | 7,861,105,898,456,781,000 | 29.836364 | 77 | 0.632665 | false |
karissa/papertalk | papertalk/utils/__init__.py | 1 | 1827 | import urllib, urllib2
import re
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise ImportError
from datetime import datetime
from bson.objectid import ObjectId
from werkzeug import Response
import pytz
def scrape(url):
"""
Scrapes a url and returns the html using the proper User Agent
"""
UA = 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) Gecko/20100913 Firefox/3.6.9'
urllib.quote(url.encode('utf-8'))
req = urllib2.Request(url=url,
headers={'User-Agent': UA})
hdl = urllib2.urlopen(req)
html = hdl.read()
return html
def utcnow():
now = datetime.utcnow()
return now.replace(tzinfo=pytz.utc)
class MongoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
elif isinstance(obj, ObjectId):
return unicode(obj)
return json.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs):
""" jsonify with support for MongoDB ObjectId"""
return Response(json.dumps(dict(*args, **kwargs), cls=MongoJsonEncoder), mimetype='application/json')
def canonicalize(title):
"""
canonicalizes an article title
"""
def normalize(s):
"""
normalizes given string for the canonicalization
"""
if not isinstance(s, basestring):
return ''
res = s.lower().strip() ## lowercase and remove outer spaced
res = re.sub("\s", '', res) ## remove spaces
res = re.sub("""["`'.&]""", '', res) ## normalize quotes
res = re.sub("""[']""", '', res) ## weird unicode found on mendeley articles
return res
canon = normalize(title)
return canon
| mit | -4,882,801,964,975,490,000 | 25.478261 | 105 | 0.619595 | false |
edusegzy/pychemqt | lib/mEoS/nC5.py | 1 | 12728 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class nC5(MEoS):
"""Multiparameter equation of state for n-pentane"""
name = "pentane"
CASNumber = "109-66-0"
formula = "CH3-(CH2)3-CH3"
synonym = "R-601"
rhoc = unidades.Density(232.)
Tc = unidades.Temperature(469.7)
Pc = unidades.Pressure(3370.0, "kPa")
M = 72.14878 # g/mol
Tt = unidades.Temperature(143.47)
Tb = unidades.Temperature(309.21)
f_acent = 0.251
momentoDipolar = unidades.DipoleMoment(0.07, "Debye")
id = 8
_Tr = unidades.Temperature(449.271155)
_rhor = unidades.Density(233.873368)
_w = 0.247058753
Fi1 = {"ao_log": [1, 3.0],
"pow": [0, 1],
"ao_pow": [],
"ao_exp": [], "titao": [],
"ao_hyp": [8.95043, 21.836, 33.4032, 0],
"hyp": [0.380391739, 1.789520971, 3.777411113, 0]}
CP1 = {"ao": 10.288132,
"an": [-0.2695377e-1, 0.20951065e-3, -0.27910773e-6, 0.12266269e-9],
"pow": [1, 2, 3, 4],
"ao_exp": [], "exp": [],
"ao_hyp": [], "hyp": []}
CP2 = {"ao": 22.5012/8.3159524*4.184,
"an": [], "pow": [],
"ao_exp": [], "exp": [],
"ao_hyp": [2.057417e8/8.3159524*4.184, 2.972927e7/8.3159524*4.184, 0, 0],
"hyp": [1.71958e3, 8.02069e2, 0, 0]}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for pentane of Span and Wagner (2003)",
"__doi__": {"autor": "Span, R., Wagner, W.",
"title": "Equations of state for technical applications. II. Results for nonpolar fluids.",
"ref": "Int. J. Thermophys. 24 (2003), 41 – 109.",
"doi": "10.1023/A:1022310214958"},
"__test__": """
>>> st=nC5(T=700, rho=200)
>>> print "%0.4f %0.3f %0.4f" % (st.cp0.kJkgK, st.P.MPa, st.cp.kJkgK)
3.2053 13.454 3.6052
>>> st2=nC5(T=750, rho=100)
>>> print "%0.2f %0.5f" % (st2.h.kJkg-st.h.kJkg, st2.s.kJkgK-st.s.kJkgK)
213.42 0.34915
""", # Table III, Pag 46
"R": 8.31451,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 600.0, "Pmax": 100000.0, "rhomax": 11.2,
"Pmin": 0.76322e-4, "rhomin": 10.566,
"nr1": [0.10968643e1, -0.29988888e1, 0.99516887, -0.16170709,
0.11334460, 0.26760595e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.40979882, -0.40876423e-1, -0.38169482, -0.10931957,
-0.32073223e-1, 0.16877016e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
GERG = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for pentane of Kunz and Wagner (2004).",
"__doi__": {"autor": "Kunz, O., Wagner, W.",
"title": "The GERG-2008 Wide-Range Equation of State for Natural Gases and Other Mixtures: An Expansion of GERG-2004",
"ref": "J. Chem. Eng. Data, 2012, 57 (11), pp 3032-3091",
"doi": "10.1021/je300655b"},
"R": 8.314472,
"cp": Fi1,
"ref": "OTO",
"Tmin": 143.47, "Tmax": 600.0, "Pmax": 100000.0, "rhomax": 10.57,
# "Pmin": 73.476, "rhomin": 29.249,
"nr1": [0.10968643098001e1, -0.29988888298061e1, 0.99516886799212,
-0.16170708558539, 0.11334460072775, 0.26760595150748e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.40979881986931, -0.40876423083075e-1, -0.38169482469447,
-0.10931956843993, -0.32073223327990e-1, 0.16877016216975e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
helmholtz3 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for pentane of Polt et al. (1992)",
"__doi__": {"autor": "Polt, A., Platzer, B., and Maurer, G.",
"title": "Parameter der thermischen Zustandsgleichung von Bender fuer 14 mehratomige reine Stoffe",
"ref": "Chem. Technik 22(1992)6 , 216/224",
"doi": ""},
"R": 8.3143,
"cp": CP1,
"ref": "NBP",
"Tmin": 238.0, "Tmax": 573.0, "Pmax": 30000.0, "rhomax": 9.410819,
"Pmin": 3.624503, "rhomin": 9.3861,
"nr1": [-0.117648900900e1, 0.163499095773e1, -0.366669005817,
0.724947274043, -0.221919300269e1, 0.188671490348e1,
-0.195774652096e1, 0.308440851184, 0.437424419722,
-0.625853472351, 0.382868807091, -0.119467393955, 0.218631441082,
0.485668874195e-1, -0.132198161379, 0.213549844850e-1],
"d1": [0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5],
"t1": [3, 4, 5, 0, 1, 2, 3, 4, 0, 1, 2, 0, 1, 0, 1, 1],
"nr2": [0.117648900900e1, -0.163499095773e1, 0.366669005817,
-0.363660829618e-2, 0.633672105685, -0.705792643982],
"d2": [0, 0, 0, 2, 2, 2],
"t2": [3, 4, 5, 3, 4, 5],
"c2": [2]*6,
"gamma2": [0.968832]*6}
helmholtz4 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for pentane of Starling (1973)",
"__doi__": {"autor": "Starling, K.E.",
"title": "Fluid Thermodynamic Properties for Light Petroleum Systems",
"ref": "Gulf Publishing Company, 1973.",
"doi": ""},
"R": 8.3159524,
"cp": CP2,
"ref": "NBP",
"Tmin": 177.0, "Tmax": 589.0, "Pmax": 55000.0, "rhomax": 10.2534,
"Pmin": 0.011064, "rhomin": 10.253,
"nr1": [0.175873733594e1, 0.485604047435, -0.111896446456e1,
-0.685918143315, 0.368714111378e-1, -0.167498784887e-2,
0.327765295239, -0.352742092747, -0.999487301826e-1,
0.781999120830e-2, 0.221577806386e-2],
"d1": [0, 1, 1, 1, 1, 1, 2, 2, 2, 5, 5],
"t1": [3, 0, 1, 3, 4, 5, 0, 1, 2, 1, 2],
"nr2": [-0.175873733594e1, -0.411653507564],
"d2": [0, 2],
"t2": [3, 3],
"c2": [2]*2,
"gamma2": [0.46812392]*2}
helmholtz5 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for pentane of Sun and Ely (2004)",
"__doi__": {"autor": "Sun, L. and Ely, J.F.",
"title": "Universal equation of state for engineering application: Algorithm and application to non-polar and polar fluids",
"ref": "Fluid Phase Equilib., 222-223:107-118, 2004.",
"doi": "10.1016/j.fluid.2004.06.028"},
"R": 8.31451,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 620.0, "Pmax": 800000.0, "rhomax": 40.,
"Pmin": 0.1, "rhomin": 40.,
"nr1": [2.20261753, 1.07797592, -3.82130221, 1.06627357e-1,
3.07513215e-4, -2.84309667e-1],
"d1": [1, 1, 1, 3, 7, 2],
"t1": [1.5, 0.25, 1.25, 0.25, 0.875, 1.375],
"nr2": [-7.28441220e-2, -4.60943732e-1, 8.39360011e-2 , -1.50650444e-2,
-2.03771872e-1, -7.90244277e-3, -5.68993564e-2, -2.99387974e-2],
"d2": [1, 1, 2, 5, 1, 1, 4, 2],
"t2": [0, 2.375, 2., 2.125, 3.5, 6.5, 4.75, 12.5],
"c2": [1, 1, 1, 1, 2, 2, 2, 3],
"gamma2": [1]*8}
MBWR = {
"__type__": "MBWR",
"__name__": " MBWR equation of state for pentane of Ratanapisit (1999).",
"__doi__": {"autor": "Ratanapisit, J., Ely, J.F.",
"title": "Application of New, Modified BWR Equations of State to the Corresponding-States Prediction of Natural Gas Properties",
"ref": "Int. J. Thermophys., 20(6):1721-1735, 1999.",
"doi": "10.1023/A:1022610013596"},
"R": 8.31434,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 600.0, "Pmax": 70000.0, "rhomax": 11.2,
"Pmin": 0.0000815, "rhomin": 10.558,
"b": [None, -7.41533782499e-2, 7.54044021950, -1.93328401588e2,
3.39428034054e4, -5.12571561595e6, 1.51195406963e-3,
-7.12225059892, 4.12664185793e3, 8.40258305443e5,
-4.68416651753e-4, 3.03565637672, -1.42146321204e3,
-1.10170659283e-1, -9.80664356304, 1.10979804446e3, 2.98029604130,
-1.41484307201e-1, -3.39208006239e1, 2.08782048763,
5.38055429992e5, -6.40401885304e8, -1.19676622034e5,
1.71973349582e10, -3.06383363882e3, 1.43168348944e6,
1.41452433419e1, -2.52955687564e7, -3.85316416299,
2.65416349789e3, 4.76643876980e-3, -8.37595968663,
-1.35160880503e3]}
eq = helmholtz1, GERG, helmholtz3, helmholtz4, helmholtz5, MBWR
_surface = {"sigma": [0.08015, 0.004384, -0.03437],
"exp": [1.408, 1.031, 1.818]}
_dielectric = {"eq": 3, "Tref": 273.16, "rhoref": 1000.,
"a0": [0.10924], "expt0": [-1.], "expd0": [1.],
"a1": [25.39, 0.025], "expt1": [0, 1], "expd1": [1, 1],
"a2": [78.39, 54.15, -12480, -4800.0],
"expt2": [0, 1, 0, 1], "expd2": [2, 2, 3, 3]}
_melting = {"eq": 1, "Tref": Tt, "Pref": 0.76322e-4,
"Tmin": Tt, "Tmax": 2000.0,
"a1": [-8647500000, 8647500001], "exp1": [0, 1.649],
"a2": [], "exp2": [], "a3": [], "exp3": []}
_vapor_Pressure = {
"eq": 5,
"ao": [-0.73918e1, 0.31102e1, -0.22415e1, -0.31585e1, -0.90451],
"exp": [1., 1.5, 1.74, 3.75, 8.0]}
_liquid_Density = {
"eq": 1,
"ao": [0.10178e1, 0.42703, 0.11334e1, 0.41518, -0.47950e-1],
"exp": [0.27, 0.44, 0.6, 4.0, 5.0]}
_vapor_Density = {
"eq": 3,
"ao": [-0.29389e1, -0.62784e1, -0.19941e2, -0.16709e2, -0.36543e2, -0.12799e3],
"exp": [0.4, 1.18, 3.2, 6.6, 7.0, 15.0]}
visco0 = {"eq": 2, "omega": 3,
"__name__": "NIST14",
"__doi__": {"autor": "",
"title": "Coefficients are taken from NIST14, Version 9.08",
"ref": "",
"doi": ""},
"ek": 341.10, "sigma": 0.5784,
"n_chapman": 0.226720214/M**0.5,
"F": [0, 0, 0, 100],
"E": [-13.47938293, 1176.6275165, 14.2278439927, -21951.0293411,
0.03766867689, 70.1529173825, 21435.7720323],
"rhoc": 3.215}
visco1 = {"eq": 4, "omega": 1,
"__name__": "Quiñones-Cisneros (2006)",
"__doi__": {"autor": "S.E.Quiñones-Cisneros and U.K. Deiters",
"title": "Generalization of the Friction Theory for Viscosity Modeling",
"ref": "J. Phys. Chem. B, 2006, 110 (25), pp 12820–12834",
"doi": "10.1021/jp0618577"},
"Tref": 469.7, "muref": 1.0,
"ek": 341.1, "sigma": 0.5784, "n_chapman": 0,
"n_ideal": [17.6805, -55.6942, 48.7177],
"t_ideal": [0, 0.25, 0.5],
"a": [1.08193e-5, -4.71699e-5, 0.0],
"b": [1.21502e-4, -9.84766e-5, 0.0],
"c": [5.08307e-5, -1.07e-5, 0.0],
"A": [-2.10025e-10, -1.56583e-9, 0.0],
"B": [1.98521e-8, 2.05972e-9, 0.0],
"C": [-1.18487e-7, 1.69571e-7, 0.0],
"D": [0.0, 0.0, 0.0]}
_viscosity = visco0, visco1
thermo0 = {"eq": 1,
"__name__": "NIST14",
"__doi__": {"autor": "",
"title": "Coefficients are taken from NIST14, Version 9.08",
"ref": "",
"doi": ""},
"Tref": 341.1, "kref": 1e-3,
"no": [1.35558587, -0.15569137, 1],
"co": [0, -1, -96],
"Trefb": 469.69, "rhorefb": 3.215, "krefb": 1e-3,
"nb": [18.6089331038, -5.83657061299, 3.48987100529,
0.704467355508, -0.206501417728, -0.22307039402],
"tb": [0, 0, 0, -1, 0, -1],
"db": [1, 3, 4, 4, 5, 5],
"cb": [0]*6,
"critical": 3,
"gnu": 0.63, "gamma": 1.239, "R0": 1.03,
"Xio": 0.194e-9, "gam0": 0.0496, "qd": 0.9345e-9, "Tcref": 704.55}
_thermal = thermo0,
| gpl-3.0 | 4,311,531,323,736,320,000 | 41.406667 | 149 | 0.465021 | false |
sunu/oppia-test-4 | oppia/apps/exploration/models.py | 1 | 3784 | # coding: utf-8
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for an Oppia exploration."""
__author__ = 'Sean Lip'
from oppia.apps.base_model.models import IdModel
from oppia.apps.parameter.models import Parameter
from oppia.apps.state.models import State
from django.db import models
from oppia.django_utils import JSONField, ListField
QUERY_LIMIT = 100
class ExplorationModel(IdModel):
"""Storage model for an Oppia exploration.
This class should only be imported by the exploration domain file, the
exploration services file, and the Exploration model test file.
"""
# TODO(sll): Write a test that ensures that the only files that are
# allowed to import this class are the ones described above.
# The category this exploration belongs to.
category = models.CharField(max_length=100)
# What this exploration is called.
title = models.CharField(max_length=100, default='New exploration')
# The list of state ids this exploration consists of. This list should not
# be empty.
state_ids = ListField(default=[], blank=True)
# The list of parameters associated with this exploration.
parameters = JSONField(blank=True, default=[], schema=[Parameter])
# Whether this exploration is publicly viewable.
is_public = models.BooleanField(default=False)
# The id for the image to show as a preview of the exploration.
image_id = models.CharField(blank=True, max_length=100, null=True)
# List of ids of users who can edit this exploration. If the exploration is
# a demo exploration, the list is empty. Otherwise, the first element is
# the original creator of the exploration.
editor_ids = ListField(default=[], blank=True)
@classmethod
def get_all_explorations(cls):
"""Returns an filterable iterable containing all explorations."""
return cls.objects.all()
@classmethod
def get_public_explorations(cls):
"""Returns an iterable containing publicly-available explorations."""
return cls.get_all_explorations().filter(is_public=True)
@classmethod
def get_viewable_explorations(cls, user_id):
"""Returns a list of explorations viewable by the given user."""
public_explorations = cls.get_public_explorations()
if user_id:
editable_explorations = cls.objects.filter(editor_ids__icontains=user_id)
return list(set(public_explorations).union(editable_explorations))
else:
return public_explorations
@classmethod
def get_exploration_count(cls):
"""Returns the total number of explorations."""
return cls.get_all_explorations().count()
def delete(self):
"""Deletes the exploration."""
super(ExplorationModel, self).delete()
def put(self, properties=None):
"""Updates the exploration using the properties dict, then saves it."""
if properties is None:
properties = {}
# In NDB, self._properties() returns the list of ndb properties of a
# model.
for key in self.attr_list():
if key in properties:
setattr(self, key, properties[key])
super(ExplorationModel, self).put()
| apache-2.0 | 4,514,169,461,693,996,000 | 37.222222 | 85 | 0.696882 | false |
wangheda/youtube-8m | youtube-8m-zhangteng/losses_embedding.py | 1 | 6481 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides definitions for non-regularized training or test losses."""
import tensorflow as tf
from tensorflow import flags
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"num_pairs", 10,
"The number of pairs (excluding the dummy 'expert') used for Hingeloss.")
flags.DEFINE_string("class_file", "./resources/labels_knowledge.out",
"The directory to save the 24 top-level verticals in, used for 'calculate_loss_mix'")
flags.DEFINE_string("frequent_file", "./resources/labels_frequent.out",
"The directory to save the frequency of 4716 labels in, used only in early experiment.")
flags.DEFINE_string("autoencoder_dir", "./resources/",
"The directory to save the autoencoder model layers in.")
flags.DEFINE_string("support_type", None,
"The support type for mix models, options are None, class, frequent and encoder,"
"used for 'calculate_loss_mix'.")
flags.DEFINE_string("loss_function", None,
"different loss funtions used in CrossEntropyLoss.")
flags.DEFINE_integer("encoder_layers", 2,
"The number of autoencoder layers.")
flags.DEFINE_float("jsd_pi", 0.5,
"wight used when loss function is loss_jsd.")
flags.DEFINE_float("threshold", 0.5,
"used only in early experiment.")
class BaseLoss(object):
"""Inherit from this class when implementing new losses."""
def calculate_loss(self, unused_predictions, unused_labels, **unused_params):
"""Calculates the average loss of the examples in a mini-batch.
Args:
unused_predictions: a 2-d tensor storing the prediction scores, in which
each row represents a sample in the mini-batch and each column
represents a class.
unused_labels: a 2-d tensor storing the labels, which has the same shape
as the unused_predictions. The labels must be in the range of 0 and 1.
unused_params: loss specific parameters.
Returns:
A scalar loss tensor.
"""
raise NotImplementedError()
class CrossEntropyLoss(BaseLoss):
"""Calculate the cross entropy loss between the predictions and labels.
"""
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
origin_labels = tf.cast(labels, tf.float32)
vocab_size = origin_labels.get_shape().as_list()[1]
float_labels = tf.tile(tf.reshape(origin_labels,[-1, 1, vocab_size]),[1,FLAGS.top_k,1])
float_labels = tf.reshape(float_labels,[-1,vocab_size])
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
num_labels = tf.minimum(tf.reduce_sum(origin_labels,axis=1),tf.constant(FLAGS.top_k,dtype=tf.float32))
mask = tf.reshape(tf.sequence_mask(num_labels,tf.constant(FLAGS.top_k,dtype=tf.float32),dtype=tf.float32),[-1])
cross_entropy_loss = tf.reduce_sum(tf.reduce_sum(cross_entropy_loss, 1)*mask)/(tf.reduce_sum(mask)+epsilon)
return cross_entropy_loss
class SoftmaxLoss(BaseLoss):
"""Calculate the softmax loss between the predictions and labels.
The function calculates the loss in the following way: first we feed the
predictions to the softmax activation function and then we calculate
the minus linear dot product between the logged softmax activations and the
normalized ground truth label.
It is an extension to the one-hot label. It allows for more than one positive
labels for each sample.
"""
def calculate_loss(self, predictions, labels, **unused_params):
bound = FLAGS.softmax_bound
vocab_size_1 = bound
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
labels_1 = float_labels[:,:vocab_size_1]
predictions_1 = predictions[:,:vocab_size_1]
cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
lables_2 = float_labels[:,vocab_size_1:]
predictions_2 = predictions[:,vocab_size_1:]
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(lables_2, 1, keep_dims=True),
epsilon)
label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
return tf.reduce_mean(softmax_loss) + cross_entropy_loss
def calculate_loss_mix(self, predictions, predictions_class, labels, **unused_params):
with tf.name_scope("loss_softmax_mix"):
vocab_size = labels.get_shape().as_list()[1]
cross_entropy_class = tf.constant(0.0)
for i in range(FLAGS.moe_layers):
predictions_subclass = predictions_class[:,i*vocab_size:(i+1)*vocab_size]
cross_entropy_class = cross_entropy_class + self.calculate_loss(predictions_subclass,labels)
cross_entropy_loss = self.calculate_loss(predictions,labels)
return cross_entropy_loss + 0.1*cross_entropy_class
| apache-2.0 | -204,301,208,306,886,980 | 50.848 | 149 | 0.647122 | false |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactbroker/test_update_historical_duns.py | 1 | 27488 | import os
import pandas as pd
from dataactbroker.scripts import update_historical_duns
from dataactcore.config import CONFIG_BROKER
from dataactcore.utils.duns import DUNS_COLUMNS, EXCLUDE_FROM_API
from dataactcore.models.domainModels import DUNS, HistoricDUNS
def test_remove_existing_duns(database):
""" Testing the removing existing duns function"""
sess = database.session
# of the duns 000000001-000000009, half of them are in the database
all_duns = ['00000000{}'.format(x) for x in range(0, 10)]
existing_duns = all_duns[: 4]
data = pd.DataFrame.from_dict({'awardee_or_recipient_uniqu': all_duns})
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
# confirm that the dataframe returned only has half the duns
expected_duns = list(set(existing_duns) ^ set(all_duns))
new_df = update_historical_duns.remove_existing_duns(data, sess)
assert sorted(expected_duns) == sorted(new_df['awardee_or_recipient_uniqu'].tolist())
def mock_get_duns_props_from_sam(duns_list):
""" Mock function for get_duns_props as we can't connect to the SAM service """
request_cols = [col for col in DUNS_COLUMNS if col not in EXCLUDE_FROM_API]
columns = request_cols
results = pd.DataFrame(columns=columns)
duns_mappings = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'legal_business_name': 'Legal Name 1',
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': [['A', 'B', 'C']],
'business_types': [['Name A', 'Name B', 'Name C']],
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'legal_business_name': 'Legal Name 2',
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': [['D', 'E', 'F']],
'business_types': [['Name D', 'Name E', 'Name F']],
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
}
}
for duns in duns_list:
if duns in duns_mappings:
results = results.append(pd.DataFrame(duns_mappings[duns]), sort=True)
return results
def test_update_duns_props(monkeypatch):
""" Testing updating the duns props with both populated/blank data """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
duns_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000001', '000000002', '000000003']
})
expected_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000001', '000000002', '000000003'],
'uei': ['A1', 'B2', None],
'address_line_1': ['Test address 1', 'Other Test address 1', None],
'address_line_2': ['Test address 2', 'Other Test address 2', None],
'city': ['Test city', 'Other Test city', None],
'state': ['Test state', 'Other Test state', None],
'zip': ['Test zip', 'Other Test zip', None],
'zip4': ['Test zip4', 'Other Test zip4', None],
'country_code': ['Test country', 'Other Test country', None],
'congressional_district': ['Test congressional district', 'Other Test congressional district', None],
'business_types_codes': [['A', 'B', 'C'], ['D', 'E', 'F'], []],
'business_types': [['Name A', 'Name B', 'Name C'], ['Name D', 'Name E', 'Name F'], []],
'entity_structure': ['1A', '2B', None],
'dba_name': ['Name 1', 'Name 2', None],
'ultimate_parent_unique_ide': ['999999999', '999999998', None],
'ultimate_parent_uei': ['Z9', 'Y8', None],
'ultimate_parent_legal_enti': ['Parent Legal Name 1', 'Parent Legal Name 2', None],
'high_comp_officer1_full_na': ['Test Exec 1', 'Test Other Exec 6', None],
'high_comp_officer1_amount': ['1', '6', None],
'high_comp_officer2_full_na': ['Test Exec 1', 'Test Other Exec 7', None],
'high_comp_officer2_amount': ['2', '7', None],
'high_comp_officer3_full_na': ['Test Exec 1', 'Test Other Exec 8', None],
'high_comp_officer3_amount': ['3', '8', None],
'high_comp_officer4_full_na': ['Test Exec 1', 'Test Other Exec 9', None],
'high_comp_officer4_amount': ['4', '9', None],
'high_comp_officer5_full_na': ['Test Exec 1', 'Test Other Exec 10', None],
'high_comp_officer5_amount': ['5', '10', None]
})
assert expected_df.sort_index(inplace=True) == update_historical_duns.update_duns_props(duns_df)\
.sort_index(inplace=True)
def test_update_duns_props_empty(monkeypatch):
""" Special case where no data is returned """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
duns_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000003']
})
expected_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000003'],
'uei': [None],
'address_line_1': [None],
'address_line_2': [None],
'city': [None],
'state': [None],
'zip': [None],
'zip4': [None],
'country_code': [None],
'congressional_district': [None],
'business_types_codes': [[]],
'business_types': [[]],
'dba_name': [None],
'entity_structure': [None],
'ultimate_parent_unique_ide': [None],
'ultimate_parent_uei': [None],
'ultimate_parent_legal_enti': [None],
'high_comp_officer1_full_na': [None],
'high_comp_officer1_amount': [None],
'high_comp_officer2_full_na': [None],
'high_comp_officer2_amount': [None],
'high_comp_officer3_full_na': [None],
'high_comp_officer3_amount': [None],
'high_comp_officer4_full_na': [None],
'high_comp_officer4_amount': [None],
'high_comp_officer5_full_na': [None],
'high_comp_officer5_amount': [None]
})
assert expected_df.to_dict() == update_historical_duns.update_duns_props(duns_df).to_dict()
def test_run_duns_batches(database, monkeypatch):
""" Test run_duns_batches for the core functionality """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
expected_results = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'registration_date': '2004-04-01',
'expiration_date': '2013-01-11',
'last_sam_mod_date': '2013-01-11',
'activation_date': '2012-01-11',
'legal_business_name': 'TEST DUNS 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': ['A', 'B', 'C'],
'business_types': ['Name A', 'Name B', 'Name C'],
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'registration_date': '2004-04-02',
'expiration_date': '2013-01-12',
'last_sam_mod_date': '2013-01-12',
'activation_date': '2012-01-12',
'legal_business_name': 'TEST DUNS 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': ['D', 'E', 'F'],
'business_types': ['Name D', 'Name E', 'Name F'],
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
}
}
results = {}
for duns_obj in sess.query(HistoricDUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
def test_workflows(database, monkeypatch):
""" Test both scenarios of the script, starting with a full run """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
update_historical_duns.import_historic_duns(sess)
expected_results = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'registration_date': '2004-04-01',
'expiration_date': '2013-01-11',
'last_sam_mod_date': '2013-01-11',
'activation_date': '2012-01-11',
'legal_business_name': 'TEST DUNS 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': ['A', 'B', 'C'],
'business_types': ['Name A', 'Name B', 'Name C'],
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'registration_date': '2004-04-02',
'expiration_date': '2013-01-12',
'last_sam_mod_date': '2013-01-12',
'activation_date': '2012-01-12',
'legal_business_name': 'TEST DUNS 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': ['D', 'E', 'F'],
'business_types': ['Name D', 'Name E', 'Name F'],
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
},
'000000003': {
'awardee_or_recipient_uniqu': '000000003',
'uei': None,
'registration_date': None,
'expiration_date': None,
'last_sam_mod_date': None,
'activation_date': None,
'legal_business_name': None,
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': None,
'business_types': None,
'dba_name': None,
'entity_structure': None,
'ultimate_parent_unique_ide': None,
'ultimate_parent_uei': None,
'ultimate_parent_legal_enti': None,
'high_comp_officer1_full_na': None,
'high_comp_officer1_amount': None,
'high_comp_officer2_full_na': None,
'high_comp_officer2_amount': None,
'high_comp_officer3_full_na': None,
'high_comp_officer3_amount': None,
'high_comp_officer4_full_na': None,
'high_comp_officer4_amount': None,
'high_comp_officer5_full_na': None,
'high_comp_officer5_amount': None
},
'000000004': {
'awardee_or_recipient_uniqu': '000000004',
'uei': None,
'registration_date': None,
'expiration_date': None,
'last_sam_mod_date': None,
'activation_date': None,
'legal_business_name': None,
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': None,
'business_types': None,
'dba_name': None,
'entity_structure': None,
'ultimate_parent_unique_ide': None,
'ultimate_parent_uei': None,
'ultimate_parent_legal_enti': None,
'high_comp_officer1_full_na': None,
'high_comp_officer1_amount': None,
'high_comp_officer2_full_na': None,
'high_comp_officer2_amount': None,
'high_comp_officer3_full_na': None,
'high_comp_officer3_amount': None,
'high_comp_officer4_full_na': None,
'high_comp_officer4_amount': None,
'high_comp_officer5_full_na': None,
'high_comp_officer5_amount': None
}
}
results = {}
for duns_obj in sess.query(DUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
# Test to see if truncating the DUNS table while keeping the historic reuploads the historic values
sess.query(DUNS).filter(DUNS.historic.is_(True)).delete(synchronize_session=False)
# Make sure all the historic DUNS are removed from the DUNS table
assert sess.query(DUNS).filter(DUNS.historic.is_(True)).all() == []
# Redo script but don't go through run_duns_batches
update_historical_duns.clean_historic_duns(sess)
update_historical_duns.import_historic_duns(sess)
results = {}
for duns_obj in sess.query(DUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
def test_clean_historic_duns(database, monkeypatch):
"""
Test to make sure if a new DUNS is loaded and we reload historic DUNS (skipping the major load),
we should remove the historic equivalents.
"""
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
# normal run
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
update_historical_duns.import_historic_duns(sess)
# update old DUNS as part of load_duns_exec_comp.py
updated_duns = sess.query(DUNS).filter(DUNS.awardee_or_recipient_uniqu == '000000002').one()
updated_duns.historic = False
sess.commit()
# rerun with a skip
update_historical_duns.clean_historic_duns(sess)
update_historical_duns.import_historic_duns(sess)
# check to see if historic duns equivalent is removed
expected_count = sess.query(HistoricDUNS).filter(HistoricDUNS.awardee_or_recipient_uniqu == '000000002').count()
assert expected_count == 0
| cc0-1.0 | -5,065,725,103,259,083,000 | 46.805217 | 116 | 0.578616 | false |
harsham05/image_space | imagespace_georgetown/server/__init__.py | 1 | 1353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
from .georgetown_imagedomaindynamicssearch import \
GeorgetownImageDomainDynamicsSearch
def load(info):
index = 'IMAGE_SPACE_GEORGETOWN_DOMAIN_DYNAMICS_SEARCH'
if index not in os.environ \
or os.environ[index] == '':
raise Exception(
'Imagespace Georgetown will not function without the %s '
'environment variable.' % index)
else:
os.environ[index] = os.environ[index].rstrip('/')
info['apiRoot'].georgetown_imagedomaindynamicssearch \
= GeorgetownImageDomainDynamicsSearch()
| apache-2.0 | -2,677,355,904,935,077,000 | 36.583333 | 79 | 0.623799 | false |
huyphan/pyyawhois | yawhois/parser/za_central_registry.py | 1 | 1890 | from .base_icann_compliant import BaseIcannCompliantParser
from ..record import Contact
from ..record import Registrar
from dateutil import parser as time_parser
class ZaCentralRegistryParser(BaseIcannCompliantParser):
@property
def domain_id(self):
return self.node('Domain ID')
@property
def expires_on(self):
if self.node('Registry Expiry Date'):
return time_parser.parse(self.node('Registry Expiry Date'))
@property
def registrar(self):
if self.node("Sponsoring Registrar"):
return Registrar(
id = self.node('Sponsoring Registrar IANA ID'),
name = self.node('Sponsoring Registrar'),
organization = self.node('Sponsoring Registrar'),
)
@property
def available(self):
return not bool(self.node("Creation Date"))
def _build_contact(self, element, type_):
if self.node("%s Name" % element):
return Contact(**{
'type': type_,
'id': self.node("%s ID" % element),
'name': self._value_for_property(element, 'Name'),
'organization': self._value_for_property(element, 'Organization'),
'address': self._value_for_property(element, 'Street'),
'city': self._value_for_property(element, 'City'),
'zip': self._value_for_property(element, 'Postal Code'),
'state': self._value_for_property(element, 'State/Province'),
'country_code': self._value_for_property(element, 'Country'),
'phone': self._value_for_phone_property(element, 'Phone'),
'fax': self._value_for_property(element, 'Fax'),
'email': self._value_for_property(element, 'Email')
})
| mit | 2,554,784,112,505,110,500 | 41 | 84 | 0.559788 | false |
UniversidadDelEste/PyZombis | lab/week4/4_diversity1/listapalabras.py | 1 | 1529 | #!/usr/bin/python2
# coding: utf-8
"""
Sea la declaracion sobre la diversidad:
“The Python Software Foundation and the global Python community welcome and encourage participation by everyone.
Our community is based on mutual respect, tolerance, and encouragement, and we are working to help each other live up to these principles.
We want our community to be more diverse: whoever you are, and whatever your background, we welcome you.”
Se debe generar una lista de palabras con el texto utilizando split(), a continuacion se debe crear una lista con las palabras
que comiencen o terminan con una de las letras "python". Imprimir la lista resultante
No olvidar remover los caracteres especiales y cuidado con las mayusculas y las minusculas
"""
def main():
p = "The Python Software Foundation and the global Python community welcome and encourage participation by everyone. "\
"Our community is based on mutual respect, tolerance, and encouragement, and we are working to help each other live up to these principles. "\
"We want our community to be more diverse: whoever you are, and whatever your background, we welcome you."
lista = p.split()
caracteres = [':', ',', '.']
letras = ('P', 'Y', 'T', 'H', 'O', 'N')
palabras = []
for x in lista:
print x
palabra = x.upper().translate(None, ''.join(caracteres))
if palabra.startswith(letras) or palabra.endswith(letras):
palabras.append(palabra)
return palabras
if __name__ == "__main__":
p = main()
print "La lista resultante es {}".format(p)
| agpl-3.0 | 7,313,390,221,214,390,000 | 45.212121 | 144 | 0.735082 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/code/model/codeimportmachine.py | 1 | 4459 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Database classes including and related to CodeImportMachine."""
__metaclass__ = type
__all__ = [
'CodeImportMachine',
'CodeImportMachineSet',
]
from sqlobject import (
SQLMultipleJoin,
StringCol,
)
from zope.component import getUtility
from zope.interface import implements
from lp.code.enums import (
CodeImportMachineOfflineReason,
CodeImportMachineState,
)
from lp.code.interfaces.codeimportevent import ICodeImportEventSet
from lp.code.interfaces.codeimportmachine import (
ICodeImportMachine,
ICodeImportMachineSet,
)
from lp.services.database.constants import (
DEFAULT,
UTC_NOW,
)
from lp.services.database.datetimecol import UtcDateTimeCol
from lp.services.database.enumcol import EnumCol
from lp.services.database.sqlbase import SQLBase
class CodeImportMachine(SQLBase):
"""See `ICodeImportMachine`."""
_defaultOrder = ['hostname']
implements(ICodeImportMachine)
date_created = UtcDateTimeCol(notNull=True, default=DEFAULT)
hostname = StringCol(default=None)
state = EnumCol(enum=CodeImportMachineState, notNull=True,
default=CodeImportMachineState.OFFLINE)
heartbeat = UtcDateTimeCol(notNull=False)
current_jobs = SQLMultipleJoin(
'CodeImportJob', joinColumn='machine',
orderBy=['date_started', 'id'])
events = SQLMultipleJoin(
'CodeImportEvent', joinColumn='machine',
orderBy=['-date_created', '-id'])
def shouldLookForJob(self, worker_limit):
"""See `ICodeImportMachine`."""
job_count = self.current_jobs.count()
if self.state == CodeImportMachineState.OFFLINE:
return False
self.heartbeat = UTC_NOW
if self.state == CodeImportMachineState.QUIESCING:
if job_count == 0:
self.setOffline(
CodeImportMachineOfflineReason.QUIESCED)
return False
elif self.state == CodeImportMachineState.ONLINE:
return job_count < worker_limit
else:
raise AssertionError(
"Unknown machine state %r??" % self.state)
def setOnline(self, user=None, message=None):
"""See `ICodeImportMachine`."""
if self.state not in (CodeImportMachineState.OFFLINE,
CodeImportMachineState.QUIESCING):
raise AssertionError(
"State of machine %s was %s."
% (self.hostname, self.state.name))
self.state = CodeImportMachineState.ONLINE
getUtility(ICodeImportEventSet).newOnline(self, user, message)
def setOffline(self, reason, user=None, message=None):
"""See `ICodeImportMachine`."""
if self.state not in (CodeImportMachineState.ONLINE,
CodeImportMachineState.QUIESCING):
raise AssertionError(
"State of machine %s was %s."
% (self.hostname, self.state.name))
self.state = CodeImportMachineState.OFFLINE
getUtility(ICodeImportEventSet).newOffline(
self, reason, user, message)
def setQuiescing(self, user, message=None):
"""See `ICodeImportMachine`."""
if self.state != CodeImportMachineState.ONLINE:
raise AssertionError(
"State of machine %s was %s."
% (self.hostname, self.state.name))
self.state = CodeImportMachineState.QUIESCING
getUtility(ICodeImportEventSet).newQuiesce(self, user, message)
class CodeImportMachineSet(object):
"""See `ICodeImportMachineSet`."""
implements(ICodeImportMachineSet)
def getAll(self):
"""See `ICodeImportMachineSet`."""
return CodeImportMachine.select()
def getByHostname(self, hostname):
"""See `ICodeImportMachineSet`."""
return CodeImportMachine.selectOneBy(hostname=hostname)
def new(self, hostname, state=CodeImportMachineState.OFFLINE):
"""See `ICodeImportMachineSet`."""
machine = CodeImportMachine(hostname=hostname, heartbeat=None)
if state == CodeImportMachineState.ONLINE:
machine.setOnline()
elif state != CodeImportMachineState.OFFLINE:
raise AssertionError(
"Invalid machine creation state: %r." % state)
return machine
| agpl-3.0 | 5,931,219,688,010,093,000 | 33.3 | 71 | 0.657098 | false |
shaun-h/PyDoc | Managers/ThemeManager.py | 1 | 6178 | import os
import json
from glob import glob
class Theme (object):
def __init__(self, j = None):
self.__backgroundColour = ''
self.__tintColour = ''
self.__toolbarBackgroundColour = ''
self.__invertWebView = False
self.__themeName = ''
self.__textColour = ''
self.__subTextColour = ''
self.__settingsCellColour = ''
self.__borderColour = ''
self.__separatorColor = ''
self.__settingsBackgroundColour = ''
self.__searchTintColour = ''
self.__searchBackgroundColour = ''
self.__cellSelectionColour = ''
self.__settingsCellSelectionColour = ''
self.__showCellSelection = True
self.__showSettingsCellSelection = True
if not j == None:
self.backgroundColour = j['BackgroundColour']
self.tintColour = j['TintColour']
self.toolbarBackgroundColour = j['ToolbarBackgroundColour']
self.invertWebView = j['InvertWebView']
self.themeName = j['ThemeName']
self.textColour = j['TextColour']
self.subTextColour = j['SubTextColour']
self.settingsCellColour = j['SettingsCellColour']
self.borderColour = j['BorderColour']
self.separatorColour = j['SeparatorColour']
self.settingsBackgroundColour = j['SettingsBackgroundColour']
self.searchTintColour = j['SearchTintColour']
self.searchBackgroundColour = j['SearchBackgroundColour']
self.cellSelectionColour = j['CellSelectionColour']
self.settingsCellSelectionColour = j['SettingsCellSelectionColour']
self.showCellSelection = j['ShowCellSelection']
self.showSettingsCellSelection = j['ShowSettingsCellSelection']
@property
def textColour(self):
return self.__textColour
@textColour.setter
def textColour(self, obj):
self.__textColour = obj
@property
def subTextColour(self):
return self.__subTextColour
@subTextColour.setter
def subTextColour(self, obj):
self.__subTextColour = obj
@property
def backgroundColour(self):
return self.__backgroundColour
@backgroundColour.setter
def backgroundColour(self, obj):
self.__backgroundColour = obj
@property
def tintColour(self):
return self.__tintColour
@tintColour.setter
def tintColour(self, obj):
self.__tintColour = obj
@property
def toolbarBackgroundColour(self):
return self.__toolbarBackgroundColour
@toolbarBackgroundColour.setter
def toolbarBackgroundColour(self, obj):
self.__toolbarBackgroundColour = obj
@property
def invertWebView(self):
return self.__invertWebView
@invertWebView.setter
def invertWebView(self, obj):
self.__invertWebView = obj
@property
def themeName(self):
return self.__themeName
@themeName.setter
def themeName(self, obj):
self.__themeName = obj
@property
def settingsCellColour(self):
return self.__settingsCellColour
@settingsCellColour.setter
def settingsCellColour(self, obj):
self.__settingsCellColour = obj
@property
def borderColour(self):
return self.__borderColour
@borderColour.setter
def borderColour(self, obj):
self.__borderColour = obj
@property
def separatorColour(self):
return self.__separatorColour
@separatorColour.setter
def separatorColour(self, obj):
self.__separatorColour = obj
@property
def settingsBackgroundColour(self):
return self.__settingsBackgroundColour
@settingsBackgroundColour.setter
def settingsBackgroundColour(self, obj):
self.__settingsBackgroundColour = obj
@property
def searchTintColour(self):
return self.__searchTintColour
@searchTintColour.setter
def searchTintColour(self, obj):
self.__searchTintColour = obj
@property
def searchBackgroundColour(self):
return self.__searchBackgroundColour
@searchBackgroundColour.setter
def searchBackgroundColour(self, obj):
self.__searchBackgroundColour = obj
@property
def cellSelectionColour(self):
return self.__cellSelectionColour
@cellSelectionColour.setter
def cellSelectionColour(self, obj):
self.__cellSelectionColour = obj
@property
def settingsCellSelectionColour(self):
return self.__settingsCellSelectionColour
@settingsCellSelectionColour.setter
def settingsCellSelectionColour(self, obj):
self.__settingsCellSelectionColour = obj
@property
def showSettingsCellSelection(self):
return self.__showSettingsCellSelection
@showSettingsCellSelection.setter
def showSettingsCellSelection(self, obj):
self.__showSettingsCellSelection = obj
@property
def showCellSelection(self):
return self.__showCellSelection
@showCellSelection.setter
def showCellSelection(self, obj):
self.__showCellSelection = obj
class ThemeManager (object):
def __init__(self, themesfolder):
self.themesFolder = themesfolder
self.themes = self.getThemes(themesfolder)
self.themeFileName = self.getThemeToUse(themesfolder)
try:
self.currentTheme = self.themes[self.themeFileName]
except KeyError:
self.setThemeToUse('Default.json')
self.themeFileName = self.getThemeToUse(themesfolder)
self.currentTheme = self.themes[self.themeFileName]
def setThemeToUse(self, themeFileName):
self.themeFileName = themeFileName
self.currentTheme = self.themes[themeFileName]
self.saveCurrentThemeToUse()
def getThemeToUse(self, themesfolder):
themeConfigPath = '.themesConfig'
if not os.path.exists(themeConfigPath):
self.saveThemeToUse('Default.json')
with open(themeConfigPath, 'r') as config:
name = config.read()
return name
def saveCurrentThemeToUse(self):
themeConfigPath = '.themesConfig'
if os.path.exists(themeConfigPath):
os.remove(themeConfigPath)
with open(themeConfigPath, 'w') as config:
config.write(self.themeFileName)
def saveThemeToUse(self, themeFileName):
themeConfigPath = '.themesConfig'
if os.path.exists(themeConfigPath):
os.remove(themeConfigPath)
with open(themeConfigPath, 'w') as config:
config.write(themeFileName)
def getThemes(self, folder):
themes = {}
folders = glob(os.path.join(folder, '*.json'))
for fullFilePath in folders:
if os.path.isfile(fullFilePath):
with open(fullFilePath , 'r') as data_file:
data = json.load(data_file)
themes[os.path.basename(fullFilePath)] = Theme(data)
return themes
if __name__ == '__main__':
tm = ThemeManager('../Themes')
| mit | -5,910,473,821,166,674,000 | 25.62931 | 70 | 0.74215 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.