repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
svk/harmless7drl | vision.py | 1 | 5614 | # Spiral FOV is my pet FOV algorithm. However, I always struggle
# with it for 7DRLs -- it's become a tradition. Let's see how I
# do this year. Starting 1847.
# Angles are measured such that 1.0 is one full circle. This is
# convenient to work with angles without many floating-point
# hassles.
from math import atan2, pi
import sys
def quadrant( x, y ):
if x > 0:
x = 1
elif x < 0:
x = -1
if y > 0:
y = 1
elif y < 0:
y = -1
return x, y
def fromRadians( angle ):
rv = angle / (2.0 * pi)
return rv + 1 if rv < 0 else rv
def reduceAngle( angle ):
rv = angle - int( angle )
if rv < 0:
rv += 1
assert rv >= 0 and rv < 1
return rv
class AngularInterval: #never full, sometimes empty
def __init__(self, begin, end):
self.begin = reduceAngle( begin )
self.end = reduceAngle( end )
self.crosses = self.begin > self.end
self.empty = begin == end
def contains(self, angle):
if self.empty:
return False
if not self.crosses:
return angle > self.begin and angle < self.end
return angle > self.begin or angle < self.end
def endIs(self, angle):
if self.empty:
return False
return angle == self.end
def intersect(self, that):
# This is not a generally correct implementation of
# intersection; sometimes the intersection of two intervals
# is two disjoint sets -- this case is not handled.
# It should never occur since we should never operate
# with angles larger than pi.
aye = False
if self.contains( that.begin ):
self.begin = that.begin
aye = True
if self.contains( that.end ):
self.end = that.end
aye = True
if not aye:
if not (that.contains( self.begin ) or that.contains( self.end )):
self.empty = True
return self
def adjoin(self, that):
# Check that this is safe instead of explicit left/right-adjoin.
if self.end == that.begin:
self.end = that.end
if self.begin == that.end:
self.begin = that.begin
self.crosses = self.begin > self.end
return self
class VisionField:
def __init__(self, origin, obstacle, radius = None, mark = None):
self.origin = origin # a tile
self.obstacle = obstacle
self.mark = mark # a lambda or None
if radius:
self.radiusSquared = radius * radius
else:
self.radiusSquared = None
self.q = []
self.tiles = {}
self.visible = set()
self.visible.add( (origin.x, origin.y) )
if self.mark:
self.mark( origin )
self.spiral = ( (1,0), (0,-1), (-1,0), (0,1) )
self.passOrderings = {
(1,0): ( (0,1), (1,0), (0,-1) ),
(1,-1): ( (1,0), (0,-1) ),
(0,-1): ( (1,0), (0,-1), (-1,0) ),
(-1,-1): ( (0,-1), (-1,0) ),
(-1,0): ( (0,-1), (-1,0), (0,1) ),
(-1,1): ( (-1,0), (0,1) ),
(0,1): ( (-1,0), (0,1), (1,0) ),
(1,1): ( (0,1), (1,0) ),
}
initialAngles = [ 0.125, 0.375, 0.625, 0.875 ]
for i in range(4):
a0 = initialAngles[i-1] #notice that this works with 0 and -1
a1 = initialAngles[i]
dx, dy = self.spiral[i]
tile = origin.getRelative( dx, dy )
if tile:
self.addNew( tile, AngularInterval( a0, a1 ) )
while self.q:
self.calculate()
def addNew(self, tile, aint):
self.tiles[ tile.x, tile.y ] = aint
self.q.append( tile )
def calculate(self):
next = self.q.pop(0)
self.visible.add( (next.x, next.y) )
if self.mark:
self.mark( next )
rx, ry = next.x - self.origin.x, next.y - self.origin.y
qxqy = quadrant( rx, ry )
try:
light = self.tiles[ next.x, next.y ]
except KeyError:
return # no light to pass
del self.tiles[ next.x, next.y ]
if self.radiusSquared and rx*rx + ry*ry > self.radiusSquared:
return
if self.obstacle( next ):
qx, qy = qxqy
ex, ey = qy, -qx
if qx == 0:
ey = -qy
if qy == 0:
ex = -qx
maxa = fromRadians( atan2( -(2 * ry + ey), 2 * rx + ex ) )
if light.endIs( maxa ):
tile = next.getRelative( *self.passOrderings[qxqy][-1] )
if tile:
self.q.append( tile )
return
for dx, dy in self.passOrderings[qxqy]:
tile = next.getRelative( dx, dy )
if not tile:
continue
assert (tile.x, tile.y) not in self.visible
self.passLight( tile, light )
def passLight(self, tile, lightIn):
nrx, nry = tile.x - self.origin.x, tile.y - self.origin.y
qx, qy = quadrant( nrx, nry )
bx, by = -qy, qx
ex, ey = qy, -qx
if qx == 0:
by, ey = -qy, -qy
if qy == 0:
bx, ex = -qx, -qx
ba = fromRadians( atan2( -(2 * nry + by), 2 * nrx + bx ) )
ea = fromRadians( atan2( -(2 * nry + ey), 2 * nrx + ex ) )
light = AngularInterval( ba, ea ).intersect( lightIn )
if light.empty:
return
try:
self.tiles[ tile.x, tile.y ].adjoin( light )
except KeyError:
self.addNew( tile, light )
| mit | -1,536,654,225,603,476,200 | 32.616766 | 78 | 0.498041 | false |
nilbody/h2o-3 | h2o-py/tests/testdir_misc/pyunit_metric_json_check.py | 1 | 10652 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
# The purpose of this test is to detect a change in the _metric_json of MetricsBase objects. Many of the metric
# accessors require _metric_json to have a particular form.
def metric_json_check():
df = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
# Regression metric json
reg_mod = h2o.gbm(y=df["CAPSULE"], x=df[3:], training_frame=df, distribution="gaussian")
reg_met = reg_mod.model_performance()
reg_metric_json_keys_have = list(reg_met._metric_json.keys())
reg_metric_json_keys_desired = [u'model_category',
u'description',
u'r2',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'scoring_time',
u'predictions',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'mean_residual_deviance']
reg_metric_diff = list(set(reg_metric_json_keys_have) - set(reg_metric_json_keys_desired))
assert not reg_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) regression " \
"metric json. The difference is {2}".format(reg_metric_json_keys_have,
reg_metric_json_keys_desired,
reg_metric_diff)
# Regression metric json (GLM)
reg_mod = h2o.glm(y=df["CAPSULE"], x=df[3:], training_frame=df, family="gaussian")
reg_met = reg_mod.model_performance()
reg_metric_json_keys_have = list(reg_met._metric_json.keys())
reg_metric_json_keys_desired = [u'model_category',
u'description',
u'r2',
u'residual_degrees_of_freedom',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'null_deviance',
u'scoring_time',
u'null_degrees_of_freedom',
u'predictions',
u'AIC',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'residual_deviance',
u'mean_residual_deviance']
reg_metric_diff = list(set(reg_metric_json_keys_have) - set(reg_metric_json_keys_desired))
assert not reg_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) glm-regression " \
"metric json. The difference is {2}".format(reg_metric_json_keys_have,
reg_metric_json_keys_desired,
reg_metric_diff)
# Binomial metric json
bin_mod = h2o.gbm(y=df["CAPSULE"].asfactor(), x=df[3:], training_frame=df, distribution="bernoulli")
bin_met = bin_mod.model_performance()
bin_metric_json_keys_have = list(bin_met._metric_json.keys())
bin_metric_json_keys_desired = [u'AUC',
u'Gini',
u'model_category',
u'description',
u'r2',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'gains_lift_table',
u'logloss',
u'scoring_time',
u'thresholds_and_metric_scores',
u'predictions',
u'max_criteria_and_metric_scores',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'domain']
bin_metric_diff = list(set(bin_metric_json_keys_have) - set(bin_metric_json_keys_desired))
assert not bin_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) binomial " \
"metric json. The difference is {2}".format(bin_metric_json_keys_have,
bin_metric_json_keys_desired,
bin_metric_diff)
# Binomial metric json (GLM)
bin_mod = h2o.glm(y=df["CAPSULE"].asfactor(), x=df[3:], training_frame=df, family="binomial")
bin_met = bin_mod.model_performance()
bin_metric_json_keys_have = list(bin_met._metric_json.keys())
bin_metric_json_keys_desired = [u'frame',
u'residual_deviance',
u'max_criteria_and_metric_scores',
u'MSE',
u'frame_checksum',
u'AIC',
u'logloss',
u'Gini',
u'predictions',
u'AUC',
u'description',
u'model_checksum',
u'duration_in_ms',
u'model_category',
u'gains_lift_table',
u'r2',
u'residual_degrees_of_freedom',
u'__meta',
u'null_deviance',
u'scoring_time',
u'null_degrees_of_freedom',
u'model',
u'thresholds_and_metric_scores',
u'domain']
bin_metric_diff = list(set(bin_metric_json_keys_have) - set(bin_metric_json_keys_desired))
assert not bin_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) glm-binomial " \
"metric json. The difference is {2}".format(bin_metric_json_keys_have,
bin_metric_json_keys_desired,
bin_metric_diff)
# Multinomial metric json
df = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
myX = ["Origin", "Dest", "IsDepDelayed", "UniqueCarrier", "Distance", "fDayofMonth", "fDayOfWeek"]
myY = "fYear"
mul_mod = h2o.gbm(x=df[myX], y=df[myY], training_frame=df, distribution="multinomial")
mul_met = mul_mod.model_performance()
mul_metric_json_keys_have = list(mul_met._metric_json.keys())
mul_metric_json_keys_desired = [u'cm',
u'model_category',
u'description',
u'r2',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'logloss',
u'scoring_time',
u'predictions',
u'hit_ratio_table',
u'model',
u'duration_in_ms',
u'frame_checksum']
mul_metric_diff = list(set(mul_metric_json_keys_have) - set(mul_metric_json_keys_desired))
assert not mul_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) multinomial " \
"metric json. The difference is {2}".format(mul_metric_json_keys_have,
mul_metric_json_keys_desired,
mul_metric_diff)
# Clustering metric json
df = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
clus_mod = h2o.kmeans(x=df[0:4], k=3, standardize=False)
clus_met = clus_mod.model_performance()
clus_metric_json_keys_have = list(clus_met._metric_json.keys())
clus_metric_json_keys_desired = [u'tot_withinss',
u'model_category',
u'description',
u'frame',
u'model_checksum',
u'MSE',
u'__meta',
u'scoring_time',
u'betweenss',
u'predictions',
u'totss',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'centroid_stats']
clus_metric_diff = list(set(clus_metric_json_keys_have) - set(clus_metric_json_keys_desired))
assert not clus_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) clustering " \
"metric json. The difference is {2}".format(clus_metric_json_keys_have,
clus_metric_json_keys_desired,
clus_metric_diff)
if __name__ == "__main__":
pyunit_utils.standalone_test(metric_json_check)
else:
metric_json_check()
| apache-2.0 | 888,273,760,287,390,600 | 56.268817 | 120 | 0.393166 | false |
cloudbase/coriolis | coriolis/api/v1/router.py | 1 | 9217 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
from oslo_log import log as logging
from coriolis import api
from coriolis.api.v1 import diagnostics
from coriolis.api.v1 import endpoint_actions
from coriolis.api.v1 import endpoint_destination_minion_pool_options
from coriolis.api.v1 import endpoint_destination_options
from coriolis.api.v1 import endpoint_instances
from coriolis.api.v1 import endpoint_networks
from coriolis.api.v1 import endpoint_source_minion_pool_options
from coriolis.api.v1 import endpoint_source_options
from coriolis.api.v1 import endpoint_storage
from coriolis.api.v1 import endpoints
from coriolis.api.v1 import migration_actions
from coriolis.api.v1 import migrations
from coriolis.api.v1 import minion_pools
from coriolis.api.v1 import minion_pool_actions
from coriolis.api.v1 import provider_schemas
from coriolis.api.v1 import providers
from coriolis.api.v1 import regions
from coriolis.api.v1 import replica_actions
from coriolis.api.v1 import replica_schedules
from coriolis.api.v1 import replica_tasks_execution_actions
from coriolis.api.v1 import replica_tasks_executions
from coriolis.api.v1 import replicas
from coriolis.api.v1 import services
LOG = logging.getLogger(__name__)
class ExtensionManager(object):
def get_resources(self):
return []
def get_controller_extensions(self):
return []
class APIRouter(api.APIRouter):
ExtensionManager = ExtensionManager
def _setup_routes(self, mapper, ext_mgr):
mapper.redirect("", "/")
self.resources['providers'] = providers.create_resource()
mapper.resource('provider', 'providers',
controller=self.resources['providers'])
self.resources['regions'] = regions.create_resource()
mapper.resource('region', 'regions',
controller=self.resources['regions'],
collection={'detail': 'GET'})
self.resources['endpoints'] = endpoints.create_resource()
mapper.resource('endpoint', 'endpoints',
controller=self.resources['endpoints'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['services'] = services.create_resource()
mapper.resource('service', 'services',
controller=self.resources['services'],
collection={'detail': 'GET'})
self.resources['minion_pools'] = minion_pools.create_resource()
mapper.resource('minion_pool', 'minion_pools',
controller=self.resources['minion_pools'],
collection={'detail': 'GET'})
minion_pool_actions_resource = minion_pool_actions.create_resource()
self.resources['minion_pool_actions'] = minion_pool_actions_resource
minion_pool_path = '/{project_id}/minion_pools/{id}'
mapper.connect('minion_pool_actions',
minion_pool_path + '/actions',
controller=self.resources['minion_pool_actions'],
action='action',
conditions={'method': 'POST'})
self.resources['endpoint_source_minion_pool_options'] = \
endpoint_source_minion_pool_options.create_resource()
mapper.resource('minion_pool_options',
'endpoints/{endpoint_id}/source-minion-pool-options',
controller=(
self.resources[
'endpoint_source_minion_pool_options']))
self.resources['endpoint_destination_minion_pool_options'] = \
endpoint_destination_minion_pool_options.create_resource()
mapper.resource('minion_pool_options',
'endpoints/{endpoint_id}/destination-minion-pool-options',
controller=(
self.resources[
'endpoint_destination_minion_pool_options']))
endpoint_actions_resource = endpoint_actions.create_resource()
self.resources['endpoint_actions'] = endpoint_actions_resource
endpoint_path = '/{project_id}/endpoints/{id}'
mapper.connect('endpoint_actions',
endpoint_path + '/actions',
controller=self.resources['endpoint_actions'],
action='action',
conditions={'method': 'POST'})
self.resources['endpoint_instances'] = \
endpoint_instances.create_resource()
mapper.resource('instance', 'endpoints/{endpoint_id}/instances',
controller=self.resources['endpoint_instances'])
self.resources['endpoint_networks'] = \
endpoint_networks.create_resource()
mapper.resource('network', 'endpoints/{endpoint_id}/networks',
controller=self.resources['endpoint_networks'])
self.resources['endpoint_storage'] = \
endpoint_storage.create_resource()
mapper.resource('storage', 'endpoints/{endpoint_id}/storage',
controller=self.resources['endpoint_storage'])
self.resources['endpoint_destination_options'] = \
endpoint_destination_options.create_resource()
mapper.resource('destination_options',
'endpoints/{endpoint_id}/destination-options',
controller=(
self.resources['endpoint_destination_options']))
self.resources['endpoint_source_options'] = \
endpoint_source_options.create_resource()
mapper.resource('source_options',
'endpoints/{endpoint_id}/source-options',
controller=(
self.resources['endpoint_source_options']))
self.resources['provider_schemas'] = \
provider_schemas.create_resource()
mapper.resource('provider_schemas',
'providers/{platform_name}/schemas/{provider_type}',
controller=self.resources['provider_schemas'])
self.resources['migrations'] = migrations.create_resource()
mapper.resource('migration', 'migrations',
controller=self.resources['migrations'],
collection={'detail': 'GET'},
member={'action': 'POST'})
migration_actions_resource = migration_actions.create_resource()
self.resources['migration_actions'] = migration_actions_resource
migration_path = '/{project_id}/migrations/{id}'
mapper.connect('migration_actions',
migration_path + '/actions',
controller=self.resources['migration_actions'],
action='action',
conditions={'method': 'POST'})
self.resources['replicas'] = replicas.create_resource()
mapper.resource('replica', 'replicas',
controller=self.resources['replicas'],
collection={'detail': 'GET'},
member={'action': 'POST'})
replica_actions_resource = replica_actions.create_resource()
self.resources['replica_actions'] = replica_actions_resource
migration_path = '/{project_id}/replicas/{id}'
mapper.connect('replica_actions',
migration_path + '/actions',
controller=self.resources['replica_actions'],
action='action',
conditions={'method': 'POST'})
self.resources['replica_tasks_executions'] = \
replica_tasks_executions.create_resource()
mapper.resource('execution', 'replicas/{replica_id}/executions',
controller=self.resources['replica_tasks_executions'],
collection={'detail': 'GET'},
member={'action': 'POST'})
replica_tasks_execution_actions_resource = \
replica_tasks_execution_actions.create_resource()
self.resources['replica_tasks_execution_actions'] = \
replica_tasks_execution_actions_resource
migration_path = '/{project_id}/replicas/{replica_id}/executions/{id}'
mapper.connect('replica_tasks_execution_actions',
migration_path + '/actions',
controller=self.resources[
'replica_tasks_execution_actions'],
action='action',
conditions={'method': 'POST'})
sched = replica_schedules.create_resource()
self.resources['replica_schedules'] = sched
mapper.resource('replica_schedule', 'replicas/{replica_id}/schedules',
controller=self.resources['replica_schedules'],
collection={'index': 'GET'},
member={'action': 'POST'})
diag = diagnostics.create_resource()
self.resources['diagnostics'] = diag
mapper.resource('diagnostics', 'diagnostics',
controller=self.resources['diagnostics'])
| agpl-3.0 | -7,495,811,954,175,948,000 | 44.855721 | 82 | 0.592709 | false |
Bachmann1234/journaler | journaler/models.py | 1 | 2071 | from collections import namedtuple
import json
import datetime
from pytz import utc
DATETIME_FORMAT = "%m-%d-%YT%H:%M:%S.%fZ"
"""
Stores a line of the food log
:type date UTC datetime
:type mood_rating int from 1 to 5 about your general mood
1 - Poor, 5 - Great
:type food_rating int from 1 to 5 about the food
1 - Unhealthy/too much, 5 - Healthy, properly portioned
:type mood_tags list[str] labels for mood. Should be stored lowercase
:type food_tags list[str] labels for food. Should be stored lowercase
:type entry_tags list[str] metadata about entry that may be helpful for analysis
:type meal str (Breakfast, Lunch, Dinner, Snack) Multiple meal tags in a day are combined to be one meal
:type note str any other thoughts longform
"""
FoodLog = namedtuple(
'FoodLog',
[
'date',
'mood_rating',
'food_rating',
'mood_tags',
'food_tags',
'entry_tags',
'meal',
'note'
]
)
BREAKFAST = 'breakfast'
LUNCH = 'lunch'
DINNER = 'dinner'
SNACK = 'snack'
VALID_MEALS = [BREAKFAST, LUNCH, DINNER, SNACK]
def food_log_to_json(food_log):
"""
Turns a FoodLog to json
:param food_log: FoodLog object
:return: json string
"""
result = food_log.__dict__
result['date'] = food_log.date.strftime(
DATETIME_FORMAT
)
return json.dumps(result)
def json_to_food_log(json_string):
"""
Turns a json string to a food_log
:param json_string:
A json formatted string that can
be made to a json log
:return: a FoodLog object
"""
log_dict = json.loads(json_string)
return FoodLog(
date=utc.localize(
datetime.datetime.strptime(
log_dict['date'],
DATETIME_FORMAT
)
),
mood_rating=log_dict['mood_rating'],
food_rating=log_dict['food_rating'],
mood_tags=log_dict['mood_tags'],
food_tags=log_dict['food_tags'],
entry_tags=log_dict['entry_tags'],
meal=log_dict['meal'],
note=log_dict['note']
)
| apache-2.0 | 2,955,659,030,745,636,400 | 25.551282 | 104 | 0.618059 | false |
MissionCriticalCloud/marvin | marvin/cloudstackAPI/createLoadBalancerRule.py | 1 | 6741 | """Creates a load balancer rule"""
from baseCmd import *
from baseResponse import *
class createLoadBalancerRuleCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""load balancer algorithm (source, roundrobin, leastconn)"""
"""Required"""
self.algorithm = None
self.typeInfo['algorithm'] = 'string'
"""name of the load balancer rule"""
"""Required"""
self.name = None
self.typeInfo['name'] = 'string'
"""the private port of the private IP address/virtual machine where the network traffic will be load balanced to"""
"""Required"""
self.privateport = None
self.typeInfo['privateport'] = 'integer'
"""the public port from where the network traffic will be load balanced from"""
"""Required"""
self.publicport = None
self.typeInfo['publicport'] = 'integer'
"""the account associated with the load balancer. Must be used with the domainId parameter."""
self.account = None
self.typeInfo['account'] = 'string'
"""the CIDR list to forward traffic from"""
self.cidrlist = []
self.typeInfo['cidrlist'] = 'list'
"""the HAProxy client_timeout setting for this load balancing rule (in ms)."""
self.clienttimeout = None
self.typeInfo['clienttimeout'] = 'integer'
"""the description of the load balancer rule"""
self.description = None
self.typeInfo['description'] = 'string'
"""the domain ID associated with the load balancer"""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""an optional field, whether to the display the rule to the end user or not"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""The guest network this rule will be created for. Required when public Ip address is not associated with any Guest network yet (VPC case)"""
self.networkid = None
self.typeInfo['networkid'] = 'uuid'
"""if true, firewall rule for source/end public port is automatically created; if false - firewall rule has to be created explicitely. If not specified 1) defaulted to false when LB rule is being created for VPC guest network 2) in all other cases defaulted to true"""
self.openfirewall = None
self.typeInfo['openfirewall'] = 'boolean'
"""The protocol for the LB"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""public IP address ID from where the network traffic will be load balanced from"""
self.publicipid = None
self.typeInfo['publicipid'] = 'uuid'
"""the HAProxy server_timeout setting for this load balancing rule (in ms)."""
self.servertimeout = None
self.typeInfo['servertimeout'] = 'integer'
"""zone where the load balancer is going to be created. This parameter is required when LB service provider is ElasticLoadBalancerVm"""
self.zoneid = None
self.typeInfo['zoneid'] = 'uuid'
self.required = ["algorithm", "name", "privateport", "publicport", ]
class createLoadBalancerRuleResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the load balancer rule ID"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account of the load balancer rule"""
self.account = None
self.typeInfo['account'] = 'string'
"""the load balancer algorithm (source, roundrobin, leastconn)"""
self.algorithm = None
self.typeInfo['algorithm'] = 'string'
"""the cidr list to forward traffic from"""
self.cidrlist = None
self.typeInfo['cidrlist'] = 'string'
"""the HAProxy client_timeout setting for this load balancing rule."""
self.clienttimeout = None
self.typeInfo['clienttimeout'] = 'integer'
"""the description of the load balancer"""
self.description = None
self.typeInfo['description'] = 'string'
"""the domain of the load balancer rule"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain ID of the load balancer rule"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""is rule for display to the regular user"""
self.fordisplay = None
self.typeInfo['fordisplay'] = 'boolean'
"""the name of the load balancer"""
self.name = None
self.typeInfo['name'] = 'string'
"""the id of the guest network the lb rule belongs to"""
self.networkid = None
self.typeInfo['networkid'] = 'string'
"""the private port"""
self.privateport = None
self.typeInfo['privateport'] = 'string'
"""the project name of the load balancer"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the load balancer"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""the protocol of the loadbalanacer rule"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""the public ip address"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""the public ip address id"""
self.publicipid = None
self.typeInfo['publicipid'] = 'string'
"""the public port"""
self.publicport = None
self.typeInfo['publicport'] = 'string'
"""the HAProxy server_timeout setting for this load balancing rule."""
self.servertimeout = None
self.typeInfo['servertimeout'] = 'integer'
"""the state of the rule"""
self.state = None
self.typeInfo['state'] = 'string'
"""the id of the zone the rule belongs to"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the list of resource tags associated with load balancer"""
self.tags = []
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| apache-2.0 | -2,408,419,068,455,296,000 | 41.664557 | 276 | 0.602285 | false |
rofferom/gpstracker-android | tools/location.py | 1 | 1454 | import re
import datetime
import math
from collections import namedtuple
Location = namedtuple('Location', ['ts', 'date', 'lat', 'lon', 'accuracy', 'speed'])
def convertStrFloat(s):
return float(s.replace(',', '.'))
def formatTimezone(timezone):
if timezone == 0:
strTimezone = "GMT"
else:
if timezone > 0:
sign = "+"
else:
sign = "-"
strTimezone = "GMT{0}{1}".format(sign, int(math.fabs(timezone)))
return strTimezone
def formatDate(ts, timezone):
# Apply timezone if available
if timezone:
ts += timezone * 3600
date = datetime.datetime.utcfromtimestamp(ts)
strTs = "{0:04d}/{1:02d}/{2:02d} {3:02d}:{4:02d}:{5:02d}" \
.format(date.year, date.month, date.day, \
date.hour, date.minute, date.second)
# Append timezone if available
if timezone:
strTimezone = formatTimezone(timezone)
strTs += " ({0})".format(strTimezone)
return strTs
def locationFromStr(s, timezone=None):
pattern = re.compile("ts:(?P<ts>\d*?);lat:(?P<lat>-?\d*?,\d*?);long:(?P<long>-?\d*?,\d*?);accuracy:(?P<accuracy>\d*?,\d*?);speed:(?P<speed>\d*?,\d*?)")
result = pattern.match(s)
if not result:
return None
# ms => s
ts = int(result.group("ts")) // 1000
return Location(
ts=ts,
date=formatDate(ts, timezone),
lat=convertStrFloat(result.group("lat")),
lon=convertStrFloat(result.group("long")),
accuracy=convertStrFloat(result.group("accuracy")),
speed=convertStrFloat(result.group("speed"))
)
| gpl-2.0 | -7,233,229,814,435,216,000 | 23.644068 | 153 | 0.649243 | false |
GutenkunstLab/SloppyCell | test/test_daskr.py | 1 | 17360 | import unittest
import scipy
import SloppyCell.Utility as Utility
import SloppyCell.daskr
from SloppyCell.daskr import daeint
redir = Utility.Redirector()
################################################################################
# Van Der Pol oscillator equations
# This test problem is from the daskr documentation.
def vdp_func(y, t):
ydot = scipy.zeros(2, scipy.float_)
ydot[0] = y[1]
ydot[1] = 100*(1-y[0]**2)*y[1] - y[0]
return ydot
def vdp_res_func(t, y, yprime, rpar):
return vdp_func(y, t) - yprime
def vdp_Dfun(t, y, yprime, cj, rpar):
pd = scipy.zeros((2,2), scipy.float_)
pd[0,0] = -cj
pd[0,1] = 1
pd[1,0] = -2*100*y[0]*y[1]-1
pd[1,1] = 100*(1-y[0]**2)-cj
return pd
def vdp_rt_func(t, y, yp, rpar):
trigger = y[0]
return scipy.asarray([trigger])
# initial settings for vdp system
y0_vdp = scipy.array([2.0, 0])
tlist_vdp = scipy.array([0] + [20*x for x in range(1, 11)])
t0_vdp = tlist_vdp[0]
yp0_vdp = vdp_func(y0_vdp, t0_vdp)
num_events_vdp = 1
abstol_vdp = scipy.array([0.1e-5, 0.1e-3])
reltol_vdp = scipy.array([0.1e-5, 0.1e-5])
################################################################################
# AlgebraicRules_BasicExample from the SBML Level 2 Version 1 test suite
# Variables x, y
def algExampleBasic_func(y,t):
ydot = scipy.zeros(3, scipy.float_)
ydot[0] = 1*y[1]
return ydot
def algExampleBasic_res_func(t, y, yprime, rpar):
res = scipy.zeros(3, scipy.float_)
ypcalc = algExampleBasic_func(y,t)
res[0] = ypcalc[0] - yprime[0]
res[1] = -y[2]+y[0]+y[1]
res[2] = yprime[2]
return res
# initial settings for basic algebraic system
y0_algBasic = scipy.array([0.5, 0.5, 1])
tlist_algBasic = scipy.array([0] + [0.2*x for x in range(1, 51)])
t0_algBasic = tlist_algBasic[0]
yp0_algBasic = algExampleBasic_func(y0_algBasic, t0_algBasic)
abstol_algBasic = scipy.array([0.1e-8, 0.1e-8, 0.1e-8])
reltol_algBasic = scipy.array([0.1e-5, 0.1e-5, 0.1e-5])
################################################################################
# AlgebraicRules_FastReactionExample from the SBML Level 2 Version 1 test suite
# The given assignmentRule is made into an algebraic rule
# Variables X0, X1, T, S1, S2
#Parameters
Keq = 2.5
k1 = 0.1
k2 = 0.15
def algExample_func(y,t):
ydot = scipy.zeros(5, scipy.float_)
ydot[0] = -k1*y[0]
ydot[1] = k2*y[4]
ydot[2] = k1*y[0] - k2*y[4]
return ydot
def algExample_res_func(t, y, yprime, ires):
res = scipy.zeros(5, scipy.float_)
ypcalc = algExample_func(y,t)
res[0] = ypcalc[0] - yprime[0]
res[1] = ypcalc[1] - yprime[1]
res[2] = ypcalc[2] - yprime[2]
res[3] = (y[3] + y[4] - y[2])
res[4] = (y[4] - Keq*y[3])
return res
# initial settings for algebraic fast reaction system
y0_alg = scipy.array([1.0, 0, 0, 0, 0])
tlist_alg = scipy.array([0] + [0.8*x for x in range(1, 51)])
t0_alg = tlist_alg[0]
yp0_alg = algExample_func(y0_alg, t0_alg)
num_events = 1
abstol_alg = scipy.array([0.1e-5, 0.1e-5, 0.1e-5, 0.1e-5, 0.1e-5])
reltol_alg = scipy.array([0.1e-5, 0.1e-5, 0.1e-5, 0.1e-5, 0.1e-5])
################################################################################
# Simple linear equation
# This test problem is for testing tstop
# Note: Some time points in the tlist will be skipped when tstop is
# encountered.
def linear_func(y, t):
ydot = scipy.zeros(1, scipy.float_)
ydot[0] = -5
return ydot
def linear_res_func(t, y, yprime, ires):
return linear_func(y, t) - yprime
# initial settings for simple linear system
y0_linear = scipy.array([100])
tlist_linear = scipy.array([0] + [20*x for x in range(1, 11)])
t0_linear = tlist_linear[0]
yp0_linear = linear_func(y0_linear, t0_linear)
abstol_linear = scipy.array([0.1e-5])
reltol_linear = scipy.array([0.1e-5])
tstop_linear = 201
################################################################################
# The non_neg example tests the checking of y going negative
# This system has a rapid change in dynamics at y = k, and it's
# easy for the integratory to miss if non-negativity is not enforced.
#Parameters
k = 1e-12
def non_neg_func(y,t):
ydot = scipy.zeros(1, scipy.float_)
ydot[0] = -y[0]/(k+y[0])
return ydot
def non_neg_res_func(t, y, yprime, ires):
res = scipy.zeros(1, scipy.float_)
ypcalc = non_neg_func(y,t)
res[0] = ypcalc[0] - yprime[0]
return res
# initial settings for basic non negative system
y0_non_neg = scipy.array([1.0])
tlist_non_neg = scipy.array([0] + [0.04*x for x in range(1, 51)])
t0_non_neg = tlist_non_neg[0]
yp0_non_neg = non_neg_func(y0_non_neg, t0_non_neg)
abstol_non_neg = scipy.array([0.1e-5])
reltol_non_neg = scipy.array([0.1e-5])
################################################################################
# Simple time dependent trigonometric system
# This test problem is for testing tstop
# Note: Some time points in the tlist will be skipped when tstop is
# encountered.
def trig_func(y, t):
ydot = scipy.zeros(1, scipy.float_)
ydot[0] = scipy.cos(t)
return ydot
def trig_res_func(t, y, yprime, ires):
return trig_func(y, t) - yprime
# initial settings for simple linear system
y0_trig = scipy.array([0])
tlist_trig = scipy.array([0] + [1000*x for x in range(1, 3)])
t0_trig = tlist_trig[0]
yp0_trig = trig_func(y0_linear, t0_linear)
abstol_trig = scipy.array([0.1e-5])
reltol_trig = scipy.array([0.1e-5])
################################################################################
class test_daskr(unittest.TestCase):
def test_basic(self):
""" Basic test of daskr """
y, t, ypout, t_root, y_root, i_root = daeint(vdp_res_func, tlist_vdp,
y0_vdp, yp0_vdp,
rtol = reltol_vdp,
atol = abstol_vdp,
intermediate_output=False)
self.assertAlmostEqual(y[1][0], 1.85821444, 4)
self.assertAlmostEqual(y[3][0], 0.1484599E+01, 4)
self.assertAlmostEqual(y[7][0], -0.1501730E+01, 4)
self.assertAlmostEqual(y[10][0], 0.1718428E+01, 4)
self.assertAlmostEqual(y[2][1], -0.9068522E-02, 3)
self.assertAlmostEqual(y[4][1], -0.5847012E-01, 3)
self.assertAlmostEqual(y[8][1], 0.3569131E-01, 3)
self.assertAlmostEqual(y[9][1], -0.7422161E-02, 3)
def test_Dfun(self):
""" Test user-supplied Jacobian """
y, t, ypout, t_root, y_root, i_root = daeint(vdp_res_func, tlist_vdp,
y0_vdp, yp0_vdp,
jac=vdp_Dfun,
rtol = reltol_vdp,
atol = abstol_vdp,
intermediate_output=False)
self.assertAlmostEqual(y[1][0], 1.85821444, 4)
self.assertAlmostEqual(y[6][1], 8.93022e-3, 4)
def test_term_roots(self):
""" Test root finding with termination """
y, t, ypout, t_root, y_root, i_root = daeint(vdp_res_func, tlist_vdp,
y0_vdp, yp0_vdp,
nrt=1,
rt=vdp_rt_func,
rtol = reltol_vdp,
atol = abstol_vdp,
intermediate_output=False)
self.assertAlmostEqual(t_root, 0.8116351E+02, 4)
self.assertAlmostEqual(y_root[0], -0.3295063E-12, 4)
self.assertAlmostEqual(y_root[1], -0.6714100E+02, 3)
self.assertEqual(i_root[0], -1)
def test_tstop(self):
""" Test that integration will not continue past tstop """
y, t, ypout, t_root, y_root, i_root = daeint(linear_res_func,
tlist_linear,
y0_linear, yp0_linear,
rtol=reltol_linear,
atol=abstol_linear,
tstop=tstop_linear)
# Check that the final time point returned is for tstop
self.assertAlmostEqual(t[-1], tstop_linear, 4)
self.assertAlmostEqual(y[2][0], -100, 4)
def test_algebraic_basic(self):
""" Test a simpler dae system (algebraicRules-basic-l2.xml) """
y, t, ypout, t_root, y_root, i_root = daeint(algExampleBasic_res_func,
tlist_algBasic,
y0_algBasic, yp0_algBasic,
rtol = reltol_algBasic,
atol = abstol_algBasic)
self.assertAlmostEqual(y[1][0], 0.590635382065755, 4)
self.assertAlmostEqual(y[13][0], 0.962863096631099, 4)
self.assertAlmostEqual(y[15][1], 0.0248936510867585, 4)
self.assertAlmostEqual(y[27][1], 0.00225832507503575, 4)
def test_algebraic_fastreactionexample(self):
""" Test a dae system (algebraicRules-fastReactionExample-l2.xml) """
y, t, ypout, t_root, y_root, i_root = daeint(algExample_res_func,
tlist_alg,
y0_alg, yp0_alg,
rtol = reltol_alg,
atol = abstol_alg)
self.assertAlmostEqual(y[1][0], 0.9231163463, 4)
self.assertAlmostEqual(y[13][0], 0.353454681, 4)
self.assertAlmostEqual(y[8][1], 0.142837751, 4)
self.assertAlmostEqual(y[20][1], 0.492844600, 4)
self.assertAlmostEqual(y[15][2], 0.346376313, 4)
self.assertAlmostEqual(y[27][2], 0.230837103, 4)
self.assertAlmostEqual(y[22][3], 0.081296859, 4)
self.assertAlmostEqual(y[37][3], 0.039501126, 4)
self.assertAlmostEqual(y[29][4], 0.150075280, 4)
self.assertAlmostEqual(y[41][4], 0.078591978, 4)
self.assertAlmostEqual(y[50][0], 0.018315639, 4)
self.assertAlmostEqual(y[50][1], 0.917958431, 4)
self.assertAlmostEqual(y[50][2], 0.06372593, 4)
self.assertAlmostEqual(y[50][3], 0.018207409, 4)
self.assertAlmostEqual(y[50][4], 0.045518522, 4)
def test_maxsteps_on(self):
""" Test to make sure the max_steps parameter works """
y, t, ypout, t_root, y_root, i_root = daeint(trig_res_func, tlist_trig,
y0_trig, yp0_trig,
rtol = reltol_trig,
atol = abstol_trig,
max_steps = 7500)
# the integrator will only get to the specified time points if
# max_steps is increased significantly above the default
self.assertAlmostEqual(y[1][0], 0.82689894, 4)
self.assertAlmostEqual(y[2][0], 0.93004774, 4)
def test_maxsteps_off(self):
""" Test to make sure the trig_func problem will cause an error \
if max_steps is not set """
redir.start()
try:
self.assertRaises(SloppyCell.daskr.daeintException,
daeint(trig_res_func, tlist_trig,
y0_trig, yp0_trig,
rtol = reltol_trig,
atol = abstol_trig))
except SloppyCell.daskr.daeintException:
pass
messages = redir.stop()
def test_algebraic_calculate_ic(self):
""" Test automatic calculation of initial conditions """
# pass an inconsistent set of initial conditions to the fast reaction
# example
y0_inconsistent = scipy.array([1.0, 0, 0, 1500, 15])
yp0_inconsistent = algExample_func(y0_inconsistent, t0_alg)
var_types_inconsistent = scipy.array([1, 1, 1, -1, -1])
y, t, ypout, t_root, y_root, i_root = daeint(algExample_res_func,
tlist_alg,
y0_inconsistent, yp0_alg,
rtol = reltol_alg,
atol = abstol_alg,
calculate_ic = True,
var_types = var_types_inconsistent)
# check to make sure the initial condition was calculated correctly
self.assertAlmostEqual(y[0][0], 1., 4)
self.assertAlmostEqual(y[0][1], 0., 4)
self.assertAlmostEqual(y[0][2], 0., 4)
self.assertAlmostEqual(y[0][3], 0., 4)
self.assertAlmostEqual(y[0][4], 0., 4)
# check other points on the trajectory
self.assertAlmostEqual(y[1][0], 0.9231163463, 4)
self.assertAlmostEqual(y[13][0], 0.353454681, 4)
self.assertAlmostEqual(y[8][1], 0.142837751, 4)
self.assertAlmostEqual(y[20][1], 0.492844600, 4)
self.assertAlmostEqual(y[15][2], 0.346376313, 4)
self.assertAlmostEqual(y[27][2], 0.230837103, 4)
self.assertAlmostEqual(y[22][3], 0.081296859, 4)
self.assertAlmostEqual(y[37][3], 0.039501126, 4)
self.assertAlmostEqual(y[29][4], 0.150075280, 4)
self.assertAlmostEqual(y[41][4], 0.078591978, 4)
self.assertAlmostEqual(y[50][0], 0.018315639, 4)
self.assertAlmostEqual(y[50][1], 0.917958431, 4)
self.assertAlmostEqual(y[50][2], 0.06372593, 4)
self.assertAlmostEqual(y[50][3], 0.018207409, 4)
self.assertAlmostEqual(y[50][4], 0.045518522, 4)
def test_enforce_non_negativity(self):
""" Test enforcement of non-negativity during integration """
# check to make sure that the answer is *incorrect* if we don't enforce
# nonegativity (ineq_constr=0)
y, t, ypout, t_root, y_root, i_root = daeint(non_neg_res_func,
tlist_non_neg,
y0_non_neg, yp0_non_neg,
rtol = reltol_non_neg,
atol = abstol_non_neg,
ineq_constr=False)
self.assertAlmostEqual(y[1][0], 0.960000000, 4)
self.assertAlmostEqual(y[-4][0], -.8800000000, 4)
# check to make sure that the answer is *correct* if we do enforce
# nonegativity (ineq_constr=2)
y, t, ypout, t_root, y_root, i_root = daeint(non_neg_res_func,
tlist_non_neg,
y0_non_neg, yp0_non_neg,
rtol = reltol_non_neg,
atol = abstol_non_neg,
ineq_constr=True)
self.assertAlmostEqual(y[1][0], 0.960000000, 4)
self.assertAlmostEqual(y[-4][0], 0.000000, 4)
def test_redirect_output(self):
""" Test to make sure we can turn output redirection on and off """
# By default output redirection is off, so we begin by doing an example
# that should generate errors and making sure that no output is received.
redir = Utility.Redirector()
redir.start()
# This example will generate errors because the maximum number of steps
# (500) will be passed
y, t, ypout, t_root, y_root, i_root = daeint(trig_res_func, tlist_trig,
y0_trig, yp0_trig,
rtol = reltol_trig,
atol = abstol_trig,
max_steps = 7500)
messages = redir.stop()
self.assertEqual(len(messages), 0)
redir = Utility.Redirector()
redir.start()
# Now we do the same example again with output redirection off
y, t, ypout, t_root, y_root, i_root = daeint(trig_res_func, tlist_trig,
y0_trig, yp0_trig,
rtol = reltol_trig,
atol = abstol_trig,
max_steps = 7500,
redir_output = False)
messages = redir.stop()
self.assertNotEqual(len(messages), 0)
################################################################################
suite = unittest.makeSuite(test_daskr)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 6,992,877,819,425,147,000 | 38.011236 | 88 | 0.497523 | false |
LHEEA/meshmagick | meshmagick/densities.py | 1 | 1669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module to retrieve some densities of different mediums"""
_DENSITIES = {'CONCRETE': 2300.,
'REINFORCED_CONCRETE': 2400.,
'FRESH_WATER': 1000.,
'SALT_WATER': 1025.,
'SAND': 1600.,
'STEEL': 7850.,
'ALUMINUM': 2700.,
'LEAD': 11350.,
'TITANIUM': 4500.,
'POLYSTYRENE': 1050.,
'GASOLINE': 750.,
'DIESEL_FUEL': 850.,
'ETHANOL': 789.,
'AIR_20DEGC': 1.204,
'BUTANE': 2.7,
'PROPANE': 2.01,
'HYDROGEN_-252DEGC': 70.,
'NITROGEN_-195DEGC': 810.
}
def get_density(medium):
"""Get the density of medium.
Parameters
----------
medium : str
The medium name
Returns
-------
float
Density of medium (kg/m**3)
"""
try:
density = _DENSITIES[str(medium).upper()]
except KeyError:
raise KeyError('Medium %s not known...' % medium)
return density
def list_medium():
"""Get the list of available medium.
Returns
-------
list
List of available medium
"""
return list(_DENSITIES.keys())
# def get_table():
#
# col_width = 22
# hline = '+{0:s}+{0:s}+\n'.format('-' * col_width)
# table = hline
# table += '|{:<{n}s}|{:>{n}s}|\n'.format('NAME', 'DENSITY (KG/M**3)', n=col_width)
# table += hline
#
# for key in _DENSITIES:
# table +=
# table += hline
#
# return table
#
# if __name__ == '__main__':
#
# print get_table()
| gpl-3.0 | 938,801,555,605,954,600 | 21.554054 | 87 | 0.461354 | false |
vesellov/visio2python | data2js.py | 1 | 25682 | #!/usr/bin/python
#data2js.py
#
#
# <<<COPYRIGHT>>>
#
#
#
#
import os
import sys
import re
import string
import pprint
def main():
d = {}
page = ''
link_id = ''
for line in open(sys.argv[1]).read().splitlines():
line = line.strip()
if line == '':
continue
#---page
if line.startswith('page'):
page = line.split(' ')[1].replace('-', '_')
if page.startswith('Page'):
page = page.replace('Page', 'Automat')
if not page.endswith('()'):
page += '()'
d[page] = {'states': [],
'links': {},
'label': '',
'errors': [],
'actions': set(),
'conditions': set(),
'events': set(),
'timers': {},
'msgfunc': False,
'messages': [], }
continue
#---state
if line.startswith('state'):
state = line.split(' ')[1]
d[page]['states'].append(state)
continue
#---label
if line.startswith('label'):
label = ' '.join(line.split(' ')[1:])
d[page]['label'] = label
continue
#---link
if line.startswith('link'):
x, link_id, state_from, arrow, state_to = line.split(' ')
state_from = state_from.strip('[]')
state_to = state_to.strip('[]')
d[page]['links'][link_id] = {'from': state_from,
'to': state_to,
'data': [],
'condition': '',
'actions': [],}
continue
#---ERROR!
if line.startswith('ERROR!'):
d[page]['errors'].append(line[6:])
print 'ERROR', line[6:]
continue
words = line.strip('{}').split('#')
color, style = words[:2]
word = '#'.join(words[2:])
word = word.replace('\\n', ' ').replace(' ', ' ')
style = int(style)
if len(d[page]['links'][link_id]['data']) > 0 and d[page]['links'][link_id]['data'][-1][0] == color:
d[page]['links'][link_id]['data'][-1][2] += word
else:
d[page]['links'][link_id]['data'].append([color, style, word])
# print
print 'Found %d pages in the file "%s"' % (len(d), sys.argv[1])
# print
for page, info in d.items():
filename = page.replace('()', '') + '.js'
filepath = os.path.abspath(os.path.join(sys.argv[3], filename))
print 'creating', filepath
automatname = page.replace('()', '')
classname = ''.join(map(string.capitalize, page.replace('()', '').split('_')))
label = info['label'].strip()
modes = []
if label:
if label.lower().startswith('bitdust'):
lbl = label.split('\\n')[1]
else:
lbl = label.split('\\n')[0]
classname = ''.join(map(string.capitalize, lbl.replace('()', '').split('_')))
automatname = lbl
if label.endswith(']'):
for mod in label.split('\\n')[-1].strip('[]').split(','):
modes.append(mod.strip())
src = ''
src_switch = ''
src_actions = ''
src_conditions = ''
#---switch start
for state_index in range(len(info['states'])):
has_conditions = False
state = info['states'][state_index]
# src += ' //---%s---\n' % state
# if state_index == 0:
src += " case '%s': {\n" % state
# else:
# src += " }\ncase '%s': {\n" % state
link_index = 0
for link_id, link in info['links'].items():
if link['from'] != state:
continue
condition = ''
actions = []
underlined = False
is_action = False
has_actions = False
for color, style, word in link['data']:
if word.strip() == '':
continue
if style != 0:
underlined = True
if underlined and style == 0:
is_action = True
if word.strip().startswith('do'):
is_action = True
if color == '[128;128;0]' and word.lower() == word and word.count('.state') == 0 and word.rstrip().endswith('('):
is_action = True
if color == '[255;0;0]':
event = word.strip()
if is_action:
if event.count(','):
for e in event.split(','):
if e.strip():
actions.append("'%s', " % e.strip())
else:
actions.append("'%s'" % event)
else:
if condition and condition[-1] != ' ':
condition += ' '
condition += "event == '%s'" % event
d[page]['events'].add(event)
if event.count('timer'):
if event not in d[page]['timers']:
d[page]['timers'][event] = []
if state not in d[page]['timers'][event]:
d[page]['timers'][event].append(state)
elif color == '[0;0;0]':
if is_action:
w = word.strip()
if w not in ['&&', '||', '!', ',', ';', 'arg']:
i = w.find('MSG')
if i >= 0:
if i > 0:
d[page]['messages'].append(w[i:])
w = "%sthis.msg('%s', arg)" % (w[:i-1], w[i:])
else:
d[page]['messages'].append(w)
w = "this.msg('%s', arg)" % w
d[page]['msgfunc'] = True
else:
if not ( w.count(' ') or w.count(',') ):
w = "'%s'" % w
if w != ';':
actions.append(w)
else:
if condition and condition[-1] != ' ':
condition += ' '
condition += word.lstrip()
elif color == '[0;128;0]':
if condition and condition[-1] != ' ':
condition += ' '
# if word.count(')') > word.count('(') and word.count('(') == 1:
if re.search('is\w+?\(\)', word.strip()):
condition += 'this.' + word.strip().replace('()', '(event, args)').lstrip()
d[page]['conditions'].add('this.' + word.strip().replace('()', '(event, args)'))
elif re.search('has\w+?\(\)', word.strip()):
condition += 'this.' + word.strip().replace('()', '(event, args)').lstrip()
d[page]['conditions'].add('this.' + word.strip().replace('()', '(event, args)'))
elif word.count('.state'):
condition += word.strip().replace('.state', '.A().state')
# elif word.count(','):
# for w in word.split(','):
# if w.strip().upper() == w.strip():
# condition += "'%s', " % w.strip()
# else:
# condition += "%s, " % w.strip()
elif word.strip().upper() == word.strip():
condition += "'%s'" % word.strip()
elif word.strip().count('len('):
condition += word.strip().replace('len(', 'len(this.')
else:
condition += 'this.' + word.strip()
elif color == '[128;128;0]':
if is_action:
actions.append("%s" % word.strip().replace('(', '.A('))
else:
if condition and condition[-1] != ' ':
condition += ' '
condition += '%s' % word.replace('().state', '.A().state').lstrip()
elif color == '[0;0;255]':
if is_action:
for nl in word.replace(';', '\n').split('\n'):
for w in nl.split(' '):
if w.strip():
prefix = ''
if w.lstrip().startswith('#'):
i = 0
nw = w.lstrip('# ')
prefix = w[:len(w) - len(nw)]
w = nw
if re.match('^do\w+?\(\)$', w):
actions.append(prefix + 'this.' + w.replace('()', '(event, args)').strip())
elif re.match('^do\w+?\(.*?MSG_\d+.*?\)$', w):
def _repl(mo):
d[page]['messages'].append(mo.group(1))
d[page]['msgfunc'] = True
return "this.msg('%s', arg)" % mo.group(1)
w = re.sub('(MSG_\d+)', _repl, w.strip())
actions.append(prefix + 'this.' + w)
elif re.match('^do\w+?\(.+?\)$', w):
actions.append(prefix + 'this.' + w.strip())
elif re.match('^[\w\ ]+[\=\+\-\*\\\/\^\%\!\&]+[\w\ ]+?$', w):
actions.append(prefix + 'this.' + w.strip())
elif re.match('^[\w\_\ ]+\.[\w\_\ ]+\(\)$', w):
actions.append(prefix + w.strip())
elif w.strip() == 'pass':
actions.append('pass')
else:
print ' ?', prefix, w
else:
print 'skipped:', link['from'], link['to'], color, style, word
if link['to'] != state:
if 'post' in modes:
if 'fast' in modes:
actions.append("newstate = '%s';" % link['to'])
else:
actions.append("this.state = '%s';" % link['to'])
else:
actions.insert(0, "this.state = '%s';" % link['to'])
condition = condition.strip()
# while True:
# r = re.search('event == \'(\w+?)\.state\' is \'([\w\s\!\?\.\-]+?)\'', condition)
# if r:
# condition = re.sub('event == \'\w+?\.state\' is \'[\w\s\!\?\.\-]+?\'', '( event == \'%s.state\' and arg == \'%s\' )' % (r.group(1), r.group(2)), condition, 1)
# # print 1, condition
# else:
# break
# while True:
# r = re.search('event == \'(\w+?)\.state\' == \'([\w\s\!\?\.\-]+?)\'', condition)
# if r:
# condition = re.sub('event == \'\w+?\.state\' == \'[\w\s\!\?\.\-]+?\'', '( event == \'%s.state\' and arg == \'%s\' )' % (r.group(1), r.group(2)), condition, 1)
# # print 1, condition
# else:
# break
# while True:
# r = re.search('event == \'(\w+?)\.state\' in \[([\'\,\w\s\!\?\.\-]+?)\]', condition)
# if r:
# condition = re.sub('event == \'\w+?\.state\' in \[[\'\,\w\s\!\?\.\-]+?\]', '( event == \'%s.state\' and arg in [ %s ] )' % (r.group(1), r.group(2)), condition, 1)
# # print 2, condition
# else:
# break
# while True:
# r = re.search('event == \'(\w+?)\.state\' not in \[([\'\,\w\s\!\?\.\-]+?)\]', condition)
# if r:
# condition = re.sub('event == \'\w+?\.state\' not in \[[\'\,\w\s\!\?\.\-]+?\]', '( event == \'%s.state\' and arg not in [ %s ] )' % (r.group(1), r.group(2)), condition, 1)
# # print 3, condition
# else:
# break
condition = condition.replace(' ', ' ')
has_conditions = True
if link_index == 0:
src += " if ( %s ) {\n" % condition
else:
src += " } else if ( %s ) {\n" % condition
d[page]['links'][link_id]['condition'] = condition
has_actions = has_actions or len(actions) > 0
opened = ''
for action in actions:
if action.count('(') == 1 and action.count(')') == 1:
if action.find('(') > action.find(')'):
if opened:
opened += action.split(')')[0]+')'
action1 = opened
opened = ''
src += " "
src += action1
src += ';\n'
if action1.startswith('this.'):
d[page]['actions'].add(action1)
d[page]['links'][link_id]['actions'].append(action1)
action = action.split(')')[1].lstrip()
opened = action
else:
if opened == '':
src += " "
src += action
src += ';\n'
if action.startswith('this.'):
d[page]['actions'].add(action)
d[page]['links'][link_id]['actions'].append(action)
else:
opened += ' ' + action
elif action.count('(') == 1 and action.count(')') == 0:
if opened != '':
opened += action
else:
opened = action
elif action.count('(') == 0 and action.count(')') == 0:
if opened != '':
opened += action
else:
src += " "
src += action
src += '\n'
d[page]['links'][link_id]['actions'].append(action)
elif action.count('(') == 0 and action.count(')') == 1:
if opened.count('(') < opened.count(')') + 1:
opened += action
else:
opened += action
action = opened
opened = ''
src += " "
src += action
src += ';\n'
if action.startswith('this.'):
d[page]['actions'].add(action)
d[page]['links'][link_id]['actions'].append(action)
elif action.count('(') == 2 and action.count(')') == 2 and action.count('this.msg') == 1:
funct = action[0:action.find('(')]
action1 = funct + '(event, args)'
src += " "
src += action
src += ';\n'
found = False
for a in d[page]['actions']:
if a.startswith(funct):
found = True
if not found:
if action.startswith('this.'):
d[page]['actions'].add(action1)
d[page]['links'][link_id]['actions'].append(action)
else:
print ' ?', action
link_index += 1
# if not has_actions:
# src += ' pass\n'
if has_conditions:
src += ' }\n'
src += ' break;\n'
src += ' }\n'
# if not has_conditions:
# src += ' pass\n'
src += ' }\n'
src += ' },\n'
d[page]['conditions'] = list(d[page]['conditions'])
d[page]['actions'] = list(d[page]['actions'])
d[page]['events'] = list(d[page]['events'])
#---switch end
src_switch = src
src = ''
#---conditions
for condition in d[page]['conditions']:
name = condition.replace('this.', '').replace('(arg)', '')
src += ' %s: function(event, args) {\n' % name
src += ' // Condition method.\n'
src += ' },\n\n'
src_conditions = src
src = ''
#---actions
for action in d[page]['actions']:
name = action.replace('this.', '').replace('(arg)', '')
src += ' %s: function(event, args) {\n' % name
src += ' // Action method.\n'
src += ' },\n\n'
src_actions = src
src = ''
#---header
head = ''
head += '\n\n'
head += '// %s\n' % automatname.replace('(', '').replace(')', '')
head += '// EVENTS:\n'
for event in sorted(d[page]['events']):
head += '// `%s`\n' % event
if len(d[page]['errors']) > 0:
head += '// ERRORS:\n'
for error in d[page]['errors']:
head += '// %s\n' % error
print ' ERROR:', error
head += '\n\n\n'
head += 'var RootVisualizer = Automat.extend({\n\n'
head += ' A: function(event, args) {\n'
head += ' // Access method to interact with %s machine.\n' % automatname
head += ' switch (this.state) {\n'
# head += ' # set automat name and starting state here\n'
# head += ' _%s = %s(\'%s\', \'%s\')\n' % (classname,
# classname,
# automatname.replace('(', '').replace(')', ''),
# d[page]['states'][0])
# head += ' if event is not None:\n'
# head += ' _%s.automat(event, arg)\n' % classname
# head += ' return _%s\n\n\n' % classname
# head += 'def Destroy():\n'
# head += ' """\n'
# head += ' Destroy %s automat and remove its instance from memory.\n' % automatname
# head += ' """\n'
# head += ' global _%s\n' % classname
# head += ' if _%s is None:\n' % classname
# head += ' return\n'
# head += ' _%s.destroy()\n' % classname
# head += ' del _%s\n' % classname
# head += ' _%s = None\n\n\n' % classname
# head += 'class %s(automat.Automat):\n' % classname
# head += ' """\n'
# head += ' This class implements all the functionality of the ``%s()`` state machine.\n' % automatname.replace('(', '').replace(')', '')
# head += ' """\n\n'
#---mode "fast"
# if 'fast' in modes:
# head += ' fast = True\n\n'
#---mode "post"
# if 'post' in modes:
# head += ' post = True\n\n'
# if len(d[page]['timers']) > 0:
# head += ' timers = {\n'
# for timer, states in d[page]['timers'].items():
# try:
# delay = timer[6:].strip()
# if delay.endswith('sec'):
# delay = delay[:-3]
# if delay.startswith('0') and '0.'+delay[1:] != delay:
# delay = '0.'+delay[1:]
# delay = float(delay)
# elif delay.endswith('min'):
# delay = int(delay[:-3]) * 60
# elif delay.endswith('hour'):
# delay = int(delay[:-4]) * 60 * 60
# else:
# delay = 0
# except:
# delay = 0
# if delay == 0:
# print ' WARNING: can not understand timer event:', timer
# continue
# head += " '%s': (%s, [%s]),\n" % (timer, str(delay), ','.join(map(lambda x: "'%s'" % x, states)))
# head += ' }\n\n'
# if d[page]['msgfunc']:
# head += ' MESSAGES = {\n'
# for msg in sorted(set(d[page]['messages'])):
# head += " '%s': '',\n" % msg
# head += ' }\n\n'
# head += ' def msg(self, msgid, arg=None):\n'
# head += " return self.MESSAGES.get(msgid, '')\n\n"
# head += ' def init(self):\n'
# head += ' """\n'
# head += ' Method to initialize additional variables and flags\n'
# head += ' at creation phase of %s machine.\n' % automatname
# head += ' """\n\n'
# head += ' def state_changed(self, oldstate, newstate, event, arg):\n'
# head += ' """\n'
# head += ' Method to catch the moment when %s state were changed.\n' % automatname
# head += ' """\n\n'
# head += ' def state_not_changed(self, curstate, event, arg):\n'
# head += ' """\n'
# head += ' This method intended to catch the moment when some event was fired in the %s\n' % automatname
# head += ' but its state was not changed.\n'
# head += ' """\n\n'
# head += ' def A(self, event, arg):\n'
# head += ' """\n'
# head += ' The state machine code, generated using `visio2python <http://bitdust.io/visio2python/>`_ tool.\n'
# head += ' """\n'
#---tail
tail = ''
# if 'init' in d[page]['events']:
# tail += '\n\ndef main():\n'
# tail += ' from twisted.internet import reactor\n'
# tail += " reactor.callWhenRunning(A, 'init')\n"
# tail += ' reactor.run()\n\n'
# tail += 'if __name__ == "__main__":\n'
# tail += ' main()\n\n'
#---modes
# if 'post' in modes and 'fast' in modes:
# head += ' newstate = self.state\n'
# src_switch += ' return newstate\n'
tail += '}\n'
src = head + src_switch + '\n\n' + src_conditions + src_actions + tail
open(filepath, 'w').write(src)
open(sys.argv[2], 'w').write(pprint.pformat(d))
print len(d), 'items wrote to', sys.argv[2]
# raw_input("\nPress ENTER to close the window")
if __name__ == '__main__':
main()
| lgpl-3.0 | 1,833,742,733,765,269,500 | 44.95064 | 195 | 0.327116 | false |
mittagessen/kraken | kraken/lib/codec.py | 1 | 8972 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
pytorch compatible codec with many-to-many mapping between labels and
graphemes.
"""
import regex
import numpy as np
from typing import List, Tuple, Set, Union, Dict, Sequence
from torch import IntTensor
from kraken.lib.exceptions import KrakenEncodeException, KrakenCodecException
__all__ = ['PytorchCodec']
class PytorchCodec(object):
"""
Translates between labels and graphemes.
"""
def __init__(self, charset: Union[Dict[str, Sequence[int]], Sequence[str], str]) -> None:
"""
Builds a codec converting between graphemes/code points and integer
label sequences.
charset may either be a string, a list or a dict. In the first case
each code point will be assigned a label, in the second case each
string in the list will be assigned a label, and in the final case each
key string will be mapped to the value sequence of integers. In the
first two cases labels will be assigned automatically.
As 0 is the blank label in a CTC output layer, output labels and input
dictionaries are/should be 1-indexed.
Args:
charset (unicode, list, dict): Input character set.
"""
if isinstance(charset, dict):
self.c2l = charset
else:
self.c2l = {k: [v] for v, k in enumerate(sorted(charset), start=1)}
# map integer labels to code points because regex only works with strings
self.l2c = {} # type: Dict[str, str]
for k, v in self.c2l.items():
self.l2c[''.join(chr(c) for c in v)] = k
# sort prefixes for c2l regex
self.c2l_regex = regex.compile(r'|'.join(regex.escape(x) for x in sorted(self.c2l.keys(), key=len, reverse=True)))
# sort prefixes for l2c regex
self.l2c_regex = regex.compile(r'|'.join(regex.escape(x) for x in sorted(self.l2c.keys(), key=len, reverse=True)))
def __len__(self) -> int:
"""
Total number of input labels the codec can decode.
"""
return len(self.l2c.keys())
def max_label(self) -> int:
"""
Returns the maximum label value.
"""
return max(l for labels in self.c2l.values() for l in labels)
def encode(self, s: str) -> IntTensor:
"""
Encodes a string into a sequence of labels.
Args:
s (str): Input unicode string
Returns:
(torch.IntTensor) encoded label sequence
Raises:
KrakenEncodeException if encoding fails.
"""
splits = self._greedy_split(s, self.c2l_regex)
labels = [] # type: List[int]
for c in splits:
labels.extend(self.c2l[c])
return IntTensor(labels)
def decode(self, labels: Sequence[Tuple[int, int, int, float]]) -> List[Tuple[str, int, int, float]]:
"""
Decodes a labelling.
Given a labelling with cuts and confidences returns a string with the
cuts and confidences aggregated across label-code point
correspondences. When decoding multilabels to code points the resulting
cuts are min/max, confidences are averaged.
Args:
labels (list): Input containing tuples (label, start, end,
confidence).
Returns:
list: A list of tuples (code point, start, end, confidence)
"""
# map into unicode space
uni_labels = ''.join(chr(v) for v, _, _, _ in labels)
start = [x for _, x, _, _ in labels]
end = [x for _, _, x, _ in labels]
con = [x for _, _, _, x in labels]
splits = self._greedy_split(uni_labels, self.l2c_regex)
decoded = []
idx = 0
for i in splits:
decoded.extend([(c, s, e, u) for c, s, e, u in zip(self.l2c[i],
len(self.l2c[i]) * [start[idx]],
len(self.l2c[i]) * [end[idx + len(i) - 1]],
len(self.l2c[i]) * [np.mean(con[idx:idx + len(i)])])])
idx += len(i)
return decoded
def _greedy_split(self, input: str, re: regex.Regex) -> List[str]:
"""
Splits an input string greedily from a list of prefixes. Stops when no
more matches are found.
Args:
input (str): input string
re (regex.Regex): Prefix match object
Returns:
(list) of prefixes
Raises:
(KrakenEncodeException) if no prefix match is found for some part
of the string.
"""
r = [] # type: List[str]
idx = 0
while True:
mo = re.match(input, idx)
if mo is None or idx == len(input):
if len(input) > idx:
raise KrakenEncodeException('No prefix matches for input after {}'.format(idx))
return r
r.append(mo.group())
idx = mo.end()
def merge(self, codec: 'PytorchCodec') -> Tuple['PytorchCodec', Set]:
"""
Transforms this codec (c1) into another (c2) reusing as many labels as
possible.
The resulting codec is able to encode the same code point sequences
while not necessarily having the same labels for them as c2.
Retains matching character -> label mappings from both codecs, removes
mappings not c2, and adds mappings not in c1. Compound labels in c2 for
code point sequences not in c1 containing labels also in use in c1 are
added as separate labels.
Args:
codec (kraken.lib.codec.PytorchCodec):
Returns:
A merged codec and a list of labels that were removed from the
original codec.
"""
# find character sequences not encodable (exact match) by new codec.
# get labels for these sequences as deletion candidates
rm_candidates = {cseq: enc for cseq, enc in self.c2l.items() if cseq not in codec.c2l}
c2l_cand = self.c2l.copy()
for x in rm_candidates.keys():
c2l_cand.pop(x)
# remove labels from candidate list that are in use for other decodings
rm_labels = [label for v in rm_candidates.values() for label in v]
for v in c2l_cand.values():
for l in rm_labels:
if l in v:
rm_labels.remove(l)
# iteratively remove labels, decrementing subsequent labels to close
# (new) holes in the codec.
offset_rm_labels = [v-idx for idx, v in enumerate(sorted(set(rm_labels)))]
for rlabel in offset_rm_labels:
c2l_cand = {k: [l-1 if l > rlabel else l for l in v] for k, v in c2l_cand.items()}
# add mappings not in original codec
add_list = {cseq: enc for cseq, enc in codec.c2l.items() if cseq not in self.c2l}
# renumber
start_idx = max((0,) + tuple(label for v in c2l_cand.values() for label in v)) + 1
add_labels = {k: v for v, k in enumerate(sorted(set(label for v in add_list.values() for label in v)), start_idx)}
for k, v in add_list.items():
c2l_cand[k] = [add_labels[label] for label in v]
return PytorchCodec(c2l_cand), set(rm_labels)
def add_labels(self, charset: Union[Dict[str, Sequence[int]], Sequence[str], str]) -> 'PytorchCodec':
"""
Adds additional characters/labels to the codec.
charset may either be a string, a list or a dict. In the first case
each code point will be assigned a label, in the second case each
string in the list will be assigned a label, and in the final case each
key string will be mapped to the value sequence of integers. In the
first two cases labels will be assigned automatically.
As 0 is the blank label in a CTC output layer, output labels and input
dictionaries are/should be 1-indexed.
Args:
charset (unicode, list, dict): Input character set.
"""
if isinstance(charset, dict):
c2l = self.c2l.copy()
c2l.update(charset)
else:
c2l = self.c2l.copy()
c2l.update({k: [v] for v, k in enumerate(sorted(charset), start=self.max_label()+1)})
return PytorchCodec(c2l)
| apache-2.0 | -5,360,282,497,666,862,000 | 39.233184 | 122 | 0.592399 | false |
quanzhuo/scripts | dropbox_parser.py | 1 | 8246 | #!/usr/bin/env python3
"""A simple dropbox parser
This is a simple tool that can parse errors in android dropbox and
print errors on stardand output, The errors are stored in a global
dict variable named "result".
"""
import os
import sys
import re
import gzip
import shutil
import time
from datetime import datetime
# 'result' variable has the following structure:
#
# {"UNKNOWN_RESET" : [time, ...],
# "FRAMEWORK_REBOOT" : [time ...],
# "SYSTEM_RESTART" : [time, ...],
# "SYSTEM_BOOT" : [time, ...],
# "system_server_watchdog" : [time ...],
# "system_server_crash" : [time ...],
# "SYSTEM_FSCK" : [time, ...],
# "system_server_anr" : [time ...],
# "system_app_crash" : {"packagename" : [time ...], ...},
# "system_app_native_crash" : {"packagename" : [time ...], ...},
# "data_app_native_crash" : {"packagename" : [time ...], ...},
# "data_app_crash" : {"packagename" : [time ...], ...},
# "system_app_anr" : {"packagename" : [time, ...], ...},
# "data_app_anr" : {"packagename" : [time, ...], ...},
# "SYSTEM_TOMBSTONE" : {"packagename" : [time, ...], ...},
# "system_app_wtf" : {"packagename" : [time, ...], ...},
# "SYSTEM_LAST_KMSG" : [time, ...],
# "SYSTEM_RECOVERY_KMSG" : [time, ...],
# "SYSTEM_AUDIT" : [time, ...],
# "system_server_wtf" : [time, ...]
# }
result = {}
verbose = False
dropboxpath = ""
def usage():
print("Usage: python " + sys.argv[0] + " [-v] <dropbox folder>\n")
print(" [-v]: Verbose output, default not")
print(" <dropbox folder>: Path to the dropbox, which is Mandatory")
def has_timestamp(filename):
pathname = os.path.join(dropboxpath, filename)
if os.path.isdir(pathname):
return False
if re.search(r"[0-9]{1,}", filename):
return True
else:
return False
def gettime_readable(filename):
"""return a human readable time string"""
unix_time = gettime_unix(filename)
return datetime.fromtimestamp(int(unix_time[:-3])).isoformat(" ")
def gettime_unix(filename):
m = re.search(r"[0-9]{1,}", filename)
return m.group(0)
def unix_to_readable(unix_time):
time_local = time.localtime(int(unix_time[:-3]))
return time.strftime("%Y-%m-%d %H:%M:%S", time_local)
def get_pkgname_sys_app_crash(pathname):
with open(pathname, errors='ignore') as f:
firstline = f.readline()
return firstline.split(":")[1].strip()
def get_pkgname_sys_app_anr(filepath):
return get_pkgname_sys_app_crash(filepath)
def get_pkgname_data_app_anr(filepath):
return get_pkgname_sys_app_crash(filepath)
def get_pkgname_data_app_crash(filepath):
return get_pkgname_sys_app_crash(filepath)
def get_pkgname_sys_app_native_crash(filepath):
return get_pkgname_sys_app_crash(filepath)
def get_pkgname_data_app_native_crash(filepath):
return get_pkgname_sys_app_crash(filepath)
def get_pkgname_system_tombstone(filepath):
pkgname = "UNKNOWN"
with open(filepath, errors='ignore') as f:
for line in f:
if ">>> " in line:
pkgname = line.split(">>>")[1].strip().split()[0]
break
return pkgname
def get_pkgname_sys_app_strictmode(filepath):
return get_pkgname_sys_app_crash(filepath)
def get_pkgname_sys_app_wtf(filepath):
return get_pkgname_sys_app_crash(filepath)
def ungzip(filename):
"""extract gzip file"""
subdir = filename[:-3]
abs_filename = os.path.join(dropboxpath, filename)
extract_to = os.path.join(dropboxpath, subdir)
if os.path.exists(extract_to):
shutil.rmtree(extract_to)
uncompressfilename = os.path.join(extract_to, subdir)
gzfile = gzip.GzipFile(mode='rb', fileobj=open(abs_filename, 'rb'))
os.mkdir(extract_to)
open(uncompressfilename, 'wb').write(gzfile.read())
return uncompressfilename
def parse_time(filename):
"""get time of the error"""
pattern = filename.split("@", 1)[0]
times = []
time = gettime_unix(filename)
if pattern in result:
times = result[pattern]
times.append(time)
else:
times = [time]
result[pattern] = times
def parse_pkgname(filename):
"""get time and package name of the error event"""
unix_time = gettime_unix(filename)
if filename.endswith(".gz"):
filepath = ungzip(filename)
else:
filepath = os.path.join(dropboxpath, filename)
pattern = filename.split("@", 1)[0]
if pattern == "system_app_crash":
packagename = get_pkgname_sys_app_crash(filepath)
elif pattern == "system_app_anr":
packagename = get_pkgname_sys_app_anr(filepath)
elif pattern == "data_app_crash":
packagename = get_pkgname_data_app_crash(filepath)
elif pattern == "data_app_anr":
packagename = get_pkgname_data_app_anr(filepath)
elif pattern == "system_app_native_crash":
packagename = get_pkgname_sys_app_native_crash(filepath)
elif pattern == "data_app_native_crash":
packagename = get_pkgname_data_app_native_crash(filepath)
elif pattern == "SYSTEM_TOMBSTONE":
packagename = get_pkgname_system_tombstone(filepath)
elif pattern == "system_app_strictmode":
packagename = get_pkgname_sys_app_strictmode(filepath)
elif pattern == "system_app_wtf":
packagename = get_pkgname_sys_app_wtf(filepath)
if pattern not in result:
result[pattern] = {}
if packagename not in result[pattern]:
result[pattern][packagename] = []
if unix_time not in result[pattern][packagename]:
result[pattern][packagename].append(unix_time)
def parse(filename):
pattern = filename.split("@", 1)[0]
if pattern == "UNKNOWN_RESET" or \
pattern == "FRAMEWORK_REBOOT" or \
pattern == "SYSTEM_RESTART" or \
pattern == "SYSTEM_BOOT" or \
pattern == "system_server_watchdog" or \
pattern == "system_server_crash" or \
pattern == "SYSTEM_FSCK" or \
pattern == "system_server_anr" or \
pattern == "SYSTEM_LAST_KMSG" or \
pattern == "SYSTEM_RECOVERY_KMSG" or \
pattern == "SYSTEM_AUDIT" or \
pattern == "system_server_wtf":
parse_time(filename)
elif pattern == "system_app_crash" or \
pattern == "data_app_crash" or \
pattern == "system_app_strictmode" or \
pattern == "system_app_anr" or \
pattern == "data_app_anr" or \
pattern == "system_app_native_crash" or \
pattern == "data_app_native_crash" or \
pattern == "SYSTEM_TOMBSTONE" or \
pattern == "system_app_wtf":
parse_pkgname(filename)
else:
#print("UNKNOW TYPE: ", pattern)
pass
def print_result(result):
"""print the result"""
if result == {}:
print("NO DROPBOX ERROR LOG FOUND!")
return
format = "%-50s%-30s%-10s"
print(format % ("PACKAGE NAME", "TIME", "COUNT"))
print()
for key, value in result.items():
print(key.center(90, '-'))
if type(value) == list:
if not verbose:
print(format % (key, unix_to_readable(value[-1]), len(value)))
else:
for i in range(len(value)):
print(format % (key, unix_to_readable(value[i]), i+1))
elif type(value) == dict:
for p, t in value.items():
if not verbose:
print(format % (p, unix_to_readable(t[-1]), len(t)))
else:
for i in range(len(t)):
print(format % (p, unix_to_readable(t[i]), i+1))
print()
def main():
if len(sys.argv) > 3:
usage()
sys.exit(-1)
for arg in sys.argv[1:]:
if arg == "-v":
global verbose
verbose = True
elif os.path.isdir(arg):
global dropboxpath
dropboxpath = arg
else:
usage()
sys.exit(-1)
if dropboxpath == "":
usage()
sys.exit(-1)
all_items = os.listdir(dropboxpath)
files_with_timestamp = [x for x in all_items if has_timestamp(x)]
for f in files_with_timestamp:
parse(f)
print_result(result)
if __name__ == "__main__":
main()
| apache-2.0 | -6,668,455,114,190,119,000 | 29.094891 | 78 | 0.5878 | false |
vollov/python-test | tests/unit/utils/test_instance_manager.py | 1 | 2378 | '''
this file demo how to manage instances
'''
import logging, unittest
logger=logging.getLogger('pytest')
class A(object):
conn='shared connection here'
def foo(self,x):
logger.debug('instance method call conn:' + self.conn)
print "executing foo(%s,%s)"%(self,x)
@classmethod
def class_foo(cls,x):
logger.debug('class method call conn:' + cls.conn)
print "executing class_foo(%s,%s)"%(cls,x)
@staticmethod
def static_foo(x):
logger.debug('static method call conn:' + A.conn)
print "executing static_foo(%s)"%x
class Session:
bind = None
def __init__(self):
logger.debug('Session initialize')
#def __init__(self, bind):
# self.bind = bind
def configure(self, bind):
self.bind = bind
def get_bind(self):
return self.bind
class DummyManager:
'''
To get session:
DummyManager.set_type('test')
session = DummyManager.get_session()
'''
engine = None
session = None
db_type = None
@classmethod
def get_session(cls):
if not cls.session:
cls.session = Session()
cls.load_engine()
return cls.session
@classmethod
def load_engine(cls):
# set default db_type
if not cls.db_type:
cls.db_type = 'prod'
if cls.db_type == 'prod':
cls.engine = 'prod_db_engine'
else:
cls.engine = 'test_db_engine'
cls.session.configure(bind=cls.engine)
@classmethod
def set_type(cls, db_type):
cls.db_type = db_type
class TestDummyManager(unittest.TestCase):
def test_run(self):
#m1 = DummyManager()
DummyManager.set_type('test')
s1 = DummyManager.get_session()
logger.debug('s1 bind={0}'.format(s1.get_bind()))
#m2 = DummyManager()
DummyManager.set_type('prod')
DummyManager.load_engine()
s2 = DummyManager.get_session()
logger.debug('s1 after set engine bind={0}'.format(s1.get_bind()))
logger.debug('s2 bind={0}'.format(s2.get_bind()))
logger.debug('s1 address={0}'.format(id(s1)))
logger.debug('s2 address={0}'.format(id(s2)))
def test_method(self):
a = A()
a.foo(1)
A.class_foo(2)
A.static_foo(3)
| mit | -3,240,063,045,336,426,500 | 22.544554 | 74 | 0.563078 | false |
nickpack/reportlab | docs/genAll.py | 1 | 1540 | #!/bin/env python
import os, sys, traceback
def _genAll(verbose=1):
from reportlab.lib.testutils import setOutDir
setOutDir(__name__)
from reportlab.lib.testutils import testsFolder
topDir=os.path.dirname(testsFolder)
L = [os.path.join(topDir,f) for f in (
'docs/reference/genreference.py',
'docs/userguide/genuserguide.py',
'tools/docco/graphdocpy.py',
)
]
for f in ('src/rl_addons/pyRXP/docs/PyRXP_Documentation.rml',
):
f = os.path.join(topDir,f)
if os.path.isfile(f):
L += [f]
break
for p in L:
os.chdir(os.path.dirname(p))
if p[-4:]=='.rml':
try:
from rlextra.rml2pdf.rml2pdf import main
main(exe=0,fn=[os.path.basename(p)], quiet=not verbose, outDir=d)
except:
if verbose: traceback.print_exc()
else:
cmd = '"%s" %s %s' % (sys.executable,os.path.basename(p), not verbose and '-s' or '')
if verbose: print cmd
os.system(cmd)
"""Runs the manual-building scripts"""
if __name__=='__main__':
#need a quiet mode for the test suite
if '-s' in sys.argv: # 'silent
verbose = 0
else:
verbose = 1
d = os.path.dirname(sys.argv[0])
if not d:
d = os.getcwd()
elif not os.path.isabs(d):
d = os.path.abspath(d)
sys.path.insert(0,os.path.dirname(d))
_genAll(verbose)
| bsd-3-clause | 2,471,236,109,906,953,700 | 31.478261 | 97 | 0.527273 | false |
google/matched_markets | matched_markets/tests/test_data_simulator.py | 1 | 5955 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test the data generator example.
"""
from absl import flags
from matched_markets.examples.data_simulator import DataSimulator
import numpy as np
import unittest
class DataSimulatorTest(unittest.TestCase):
def setUp(self):
super().setUp()
# Experimental design.
self.n_control = 50
self.n_treat = 50
self.time_pre = 100
self.time_test = 100
# Linear params.
self.hetresp = 1.0
self.hetcost = 0.0
self.beta = 0.0
# Noise params.
self.hetsked = 0.0
self.sig_resp = 0.0
self.sig_cost = 0.0
# Column names.
self.df_keys = {
'key_response': 'sales',
'key_cost': 'cost',
'key_group': 'geo.group',
'key_period': 'period',
'key_geo': 'geo',
'key_date': 'date'
}
def testSampleRows(self):
"""Checks if .sample() has the correct number of rows."""
# Make simulator.
simulator = DataSimulator(self.n_control,
self.n_treat,
self.time_pre,
self.time_test,
self.hetresp,
self.hetcost,
self.beta,
self.hetsked,
self.sig_resp,
self.sig_cost,
**self.df_keys)
# Simulate data.
fake_data = simulator.sample()
# Derived constants.
time_total = self.time_pre + self.time_test
n_total = self.n_treat + self.n_control
col_len = time_total * n_total
self.assertEqual(len(fake_data.index), col_len) # pylint: disable=g-generic-assert
def testSampleColumns(self):
"""Check whether .sample() returns an appropriate `pd.DataFrame`."""
# Make simulator.
simulator = DataSimulator(self.n_control,
self.n_treat,
self.time_pre,
self.time_test,
self.hetresp,
self.hetcost,
self.beta,
self.hetsked,
self.sig_resp,
self.sig_cost,
**self.df_keys)
# Simulate data.
fake_data = simulator.sample()
column_keys = ['sales', 'cost', 'geo.group', 'period', 'date', 'size']
self.assertCountEqual(fake_data.columns, column_keys)
def testSalesColumn(self):
"""Check whether .sample() returns an appropriate `pd.DataFrame`."""
# Make simulator.
simulator = DataSimulator(self.n_control,
self.n_treat,
self.time_pre,
self.time_test,
self.hetresp,
self.hetcost,
self.beta,
self.hetsked,
self.sig_resp,
self.sig_cost,
**self.df_keys)
# Simulate data.
fake_data = simulator.sample()
total_sales = fake_data.sales.sum()
# Derive the true total sales.
time_total = self.time_pre + self.time_test
sales_treat = self.n_treat * (self.n_treat + 1) / 2.0
sales_control = self.n_control * (self.n_control + 1) / 2.0
sales_true = (sales_treat + sales_control) * time_total
self.assertAlmostEqual(sales_true, total_sales)
def testFixingSeedResultsInSameData(self):
"""Checks simulators with the same random seed produce the same samples."""
# Fix a seed for the random number generators.
seed = 1234
# Parameters ensuring the data contains noise.
sig_resp = 1.0
sig_cost = 1.0
# Make simulator.
simulator1 = DataSimulator(self.n_control,
self.n_treat,
self.time_pre,
self.time_test,
self.hetresp,
self.hetcost,
self.beta,
self.hetsked,
sig_resp,
sig_cost,
seed=seed,
**self.df_keys)
# Simulate data, calculate a characteristic number.
fake_data1 = simulator1.sample()
sum1 = np.sum(fake_data1.values)
# Make identical simulator.
simulator2 = DataSimulator(self.n_control,
self.n_treat,
self.time_pre,
self.time_test,
self.hetresp,
self.hetcost,
self.beta,
self.hetsked,
sig_resp,
sig_cost,
seed=seed,
**self.df_keys)
# Simulate (hopefully) identical data, calculate a characteristic number.
fake_data2 = simulator2.sample()
sum2 = np.sum(fake_data2.values)
self.assertEqual(sum1, sum2)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,039,507,994,984,469,500 | 32.083333 | 87 | 0.488329 | false |
ReactiveX/RxPY | tests/test_scheduler/test_mainloop/test_qtscheduler_pyqt5.py | 1 | 4333 | import pytest
import threading
from datetime import timedelta
from time import sleep
QtCore = pytest.importorskip('PyQt5.QtCore')
from rx.scheduler.mainloop import QtScheduler
from rx.internal.basic import default_now
@pytest.fixture(scope="module")
def app():
# share qt application among all tests
app = QtCore.QCoreApplication([])
yield app
# teardown
class TestQtSchedulerPyQt5:
def test_pyqt5_schedule_now(self):
scheduler = QtScheduler(QtCore)
diff = scheduler.now - default_now()
assert abs(diff) < timedelta(milliseconds=1)
def test_pyqt5_schedule_now_units(self):
scheduler = QtScheduler(QtCore)
diff = scheduler.now
sleep(0.1)
diff = scheduler.now - diff
assert timedelta(milliseconds=80) < diff < timedelta(milliseconds=180)
def test_pyqt5_schedule_action(self, app):
scheduler = QtScheduler(QtCore)
gate = threading.Semaphore(0)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
scheduler.schedule(action)
def done():
app.quit()
gate.release()
QtCore.QTimer.singleShot(50, done)
app.exec_()
gate.acquire()
assert ran is True
def test_pyqt5_schedule_action_due_relative(self, app):
scheduler = QtScheduler(QtCore)
gate = threading.Semaphore(0)
starttime = default_now()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = default_now()
scheduler.schedule_relative(0.2, action)
def done():
app.quit()
gate.release()
QtCore.QTimer.singleShot(300, done)
app.exec_()
gate.acquire()
assert endtime is not None
diff = endtime - starttime
assert diff > timedelta(milliseconds=180)
def test_pyqt5_schedule_action_due_absolute(self, app):
scheduler = QtScheduler(QtCore)
gate = threading.Semaphore(0)
starttime = default_now()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = default_now()
scheduler.schedule_absolute(starttime + timedelta(seconds=0.2), action)
def done():
app.quit()
gate.release()
QtCore.QTimer.singleShot(300, done)
app.exec_()
gate.acquire()
assert endtime is not None
diff = endtime - starttime
assert diff > timedelta(milliseconds=180)
def test_pyqt5_schedule_action_cancel(self, app):
ran = False
scheduler = QtScheduler(QtCore)
gate = threading.Semaphore(0)
def action(scheduler, state):
nonlocal ran
ran = True
d = scheduler.schedule_relative(0.1, action)
d.dispose()
def done():
app.quit()
gate.release()
QtCore.QTimer.singleShot(300, done)
app.exec_()
gate.acquire()
assert ran is False
def test_pyqt5_schedule_action_periodic(self, app):
scheduler = QtScheduler(QtCore)
gate = threading.Semaphore(0)
period = 0.050
counter = 3
def action(state):
nonlocal counter
if state:
counter -= 1
return state - 1
scheduler.schedule_periodic(period, action, counter)
def done():
app.quit()
gate.release()
QtCore.QTimer.singleShot(300, done)
app.exec_()
gate.acquire()
assert counter == 0
def test_pyqt5_schedule_periodic_cancel(self, app):
scheduler = QtScheduler(QtCore)
gate = threading.Semaphore(0)
period = 0.05
counter = 3
def action(state):
nonlocal counter
if state:
counter -= 1
return state - 1
disp = scheduler.schedule_periodic(period, action, counter)
def dispose():
disp.dispose()
QtCore.QTimer.singleShot(100, dispose)
def done():
app.quit()
gate.release()
QtCore.QTimer.singleShot(300, done)
app.exec_()
gate.acquire()
assert 0 < counter < 3
| mit | 7,588,271,767,783,262,000 | 22.677596 | 79 | 0.571659 | false |
nelisky/py-stellar-base | stellar_base/memo.py | 1 | 1934 | # coding: utf-8
from .utils import XdrLengthError
from .stellarxdr import StellarXDR_pack as Xdr
# Compatibility for Python 3.x that don't have unicode type
try:
type(unicode)
except NameError:
unicode = str
class NoneMemo(object):
def __init__(self):
pass
def to_xdr_object(self):
return Xdr.types.Memo(type=Xdr.const.MEMO_NONE)
class TextMemo(object):
def __init__(self, text):
if not isinstance(text,(str,unicode)):
raise TypeError('Expects string type got a ' + type(text))
if bytes == str and not isinstance(text,unicode): # Python 2 without unicode string
self.text = text
else: # python 3 or python 2 with unicode string
self.text = bytearray(text, encoding='utf-8')
length = len(self.text)
if length > 28:
raise XdrLengthError("Text should be <= 28 bytes (ascii encoded). Got {:s}".format(str(length)))
def to_xdr_object(self):
return Xdr.types.Memo(type=Xdr.const.MEMO_TEXT, text=self.text)
class IdMemo(object):
def __init__(self, mid):
self.mid = mid
def to_xdr_object(self):
return Xdr.types.Memo(type=Xdr.const.MEMO_ID, id=self.mid)
class HashMemo(object):
def __init__(self, memo_hash):
if len(memo_hash) != 32:
raise XdrLengthError("Expects a 32 byte mhash value. Got {:d} bytes instead".format(len(memo_hash)))
self.memo_hash = memo_hash
def to_xdr_object(self):
return Xdr.types.Memo(type=Xdr.const.MEMO_HASH, hash=self.memo_hash)
class RetHashMemo(object):
def __init__(self, memo_return):
if len(memo_return) != 32:
raise XdrLengthError("Expects a 32 byte hash value. Got {:d} bytes instead".format(len(memo_return)))
self.memo_return = memo_return
def to_xdr_object(self):
return Xdr.types.Memo(type=Xdr.const.MEMO_RETURN, retHash=self.memo_return)
| apache-2.0 | -5,554,082,174,817,244,000 | 31.233333 | 113 | 0.638573 | false |
jimy-byerley/Tron-R-reboot-reloaded- | shaders/arena_landing_zone.py | 1 | 2482 | from bge import logic as g
from bge import texture
from mathutils import *
from math import *
import bgl
cont = g.getCurrentController()
own = cont.owner
scene = g.getCurrentScene()
objlist = scene.objects
reflsize = 512 #reflection tex dimensions
refrsize = 512 #refraction tex dimensions
offset = 200.0 #geometry clipping offset
#texture background color
bgR = 0.02
bgG = 0.02
bgB = 0.02
bgA = 0.0
activecam = scene.active_camera
viewer = activecam
watercamera = objlist['reflectcamera'] #camera used for rendering the water
#setting lens and projection to watercamera
watercamera.lens = activecam.lens
watercamera.projection_matrix = activecam.projection_matrix
#rotation and mirror matrices
m1=Matrix(own.orientation)
m2=Matrix(own.orientation)
m2.invert()
r180 = Matrix.Rotation(radians(180),3,'Y')
unmir = Matrix.Scale(-1,3,Vector([1,0,0]))
#disable visibility for the water surface during texture rendering
own.visible = False
###REFLECTION####################
#initializing camera for reflection pass
pos = (viewer.position - own.position)*m1
#watercamera.near = abs((watercamera.position - own.position - pos*r180*unmir*m2).z/2)
watercamera.position = own.position + pos*r180*unmir*m2
ori = Matrix(viewer.orientation)
ori.transpose()
ori = ori*m1*r180*unmir*m2
ori.transpose()
watercamera.orientation = ori
#culling front faces as the camera is scaled to -1
bgl.glCullFace(bgl.GL_FRONT)
#plane equation
normal = own.getAxisVect((0.0, 0.0, 1.0)) #plane normals Z=front
D = -own.position.project(normal).magnitude #closest distance from center to plane
V = (activecam.position-own.position).normalized().dot(normal) #VdotN to get frontface/backface
#invert normals when backface
if V<0:
normal = -normal
#making a clipping plane buffer
plane = bgl.Buffer(bgl.GL_DOUBLE, [4], [-normal[0], -normal[1], -normal[2], -D+offset])
bgl.glClipPlane(bgl.GL_CLIP_PLANE0, plane)
bgl.glEnable(bgl.GL_CLIP_PLANE0)
#rendering the reflection texture in tex channel 0
if not hasattr(g, 'arena_landing_zone'):
g.arena_landing_zone = texture.Texture(own, 0, 0)
g.arena_landing_zone.source = texture.ImageRender(scene,watercamera)
g.arena_landing_zone.source.capsize = [reflsize,reflsize]
g.arena_landing_zone.source.background = [int(bgR*255),int(bgG*255),int(bgB*255),int(bgA*255)]
g.arena_landing_zone.refresh(True)
#restoring face culling to normal and disabling the geometry clipping
bgl.glCullFace(bgl.GL_BACK)
bgl.glDisable(bgl.GL_CLIP_PLANE0)
own.visible = True
| gpl-3.0 | -2,783,831,889,813,963,300 | 28.2 | 95 | 0.756245 | false |
mit-dci/lit | test/testutil.py | 1 | 1359 | #!/usr/bin/env python3
# Copyright (c) 2018 The lit developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Utils for lit testing"""
import time
import logging
import socket
logger = logging.getLogger("testframework")
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def wait_until(predicate, *, attempts=120, dt=0.25, errmsg=None): # up to 30 seconds
attempt = 0
while attempt < attempts:
if predicate():
return True
attempt += 1
time.sleep(dt)
if errmsg is not None:
if errmsg == False:
return False
else:
raise AssertionError(str(errmsg))
else:
raise AssertionError("wait_until() timed out")
def check_port_open(host, port, timeout=0.05):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
res = sock.connect_ex((host, port))
if res == 0:
sock.close()
return True
else:
return False
def wait_until_port(host, port, errmsg=None):
def p():
return check_port_open(host, port)
return wait_until(p, errmsg=errmsg)
| mit | 6,088,042,509,442,119,000 | 28.543478 | 98 | 0.637969 | false |
WeblateOrg/weblate | weblate/trans/migrations/0116_migrate_glossaries.py | 1 | 7111 | # Generated by Django 3.1.4 on 2021-02-01 14:12
import os
from django.conf import settings
from django.db import migrations
from django.utils.text import slugify
from translate.misc.xml_helpers import valid_chars_only
from weblate.formats.ttkit import TBXFormat
from weblate.utils.hash import calculate_hash
from weblate.utils.state import STATE_READONLY, STATE_TRANSLATED
from weblate.vcs.git import LocalRepository
def create_glossary(project, name, slug, glossary, license):
return project.component_set.create(
slug=slug,
name=name,
is_glossary=True,
glossary_name=glossary.name,
glossary_color=glossary.color,
allow_translation_propagation=False,
manage_units=True,
file_format="tbx",
filemask="*.tbx",
vcs="local",
repo="local:",
branch="main",
source_language=glossary.source_language,
license=license,
)
def migrate_glossaries(apps, schema_editor): # noqa: C901
Project = apps.get_model("trans", "Project")
Language = apps.get_model("lang", "Language")
db_alias = schema_editor.connection.alias
projects = Project.objects.using(db_alias).all()
total = len(projects)
processed = 0
for processed, project in enumerate(projects):
component_slugs = set(project.component_set.values_list("slug", flat=True))
percent = int(100 * processed / total)
print(f"Migrating glossaries {percent}% [{processed}/{total}]...{project.name}")
glossaries = project.glossary_set.all()
try:
license = project.component_set.exclude(license="").values_list(
"license", flat=True
)[0]
except IndexError:
license = ""
for glossary in glossaries:
if len(glossaries) == 1:
name = "Glossary"
slug = "glossary"
else:
name = f"Glossary: {glossary.name}"
slug = f"glossary-{slugify(glossary.name)}"
base_name = name
base_slug = slug
# Create component
attempts = 0
while True:
if slug not in component_slugs:
component = create_glossary(project, name, slug, glossary, license)
component_slugs.add(slug)
break
attempts += 1
name = f"{base_name} - {attempts}"
slug = f"{base_slug}-{attempts}"
repo_path = os.path.join(settings.DATA_DIR, "vcs", project.slug, slug)
# Create VCS repository
repo = LocalRepository.from_files(repo_path, {})
# Migrate links
component.links.set(glossary.links.all())
# Create source translation
source_translation = component.translation_set.create(
language=glossary.source_language,
check_flags="read-only",
filename="",
plural=glossary.source_language.plural_set.filter(source=0)[0],
language_code=glossary.source_language.code,
)
source_units = {}
# Get list of languages
languages = (
Language.objects.filter(term__glossary=glossary)
.exclude(pk=glossary.source_language.pk)
.distinct()
)
# Migrate ters
for language in languages:
base_filename = f"{language.code}.tbx"
filename = os.path.join(repo_path, base_filename)
# Create translation object
translation = component.translation_set.create(
language=language,
plural=language.plural_set.filter(source=0)[0],
filename=base_filename,
language_code=language.code,
)
# Create store file
TBXFormat.create_new_file(filename, language.code, "")
store = TBXFormat(
filename,
language_code=language.code,
source_language=glossary.source_language.code,
)
id_hashes = set()
for position, term in enumerate(
glossary.term_set.filter(language=language)
):
source = valid_chars_only(term.source)
target = valid_chars_only(term.target)
context = ""
# Store to the file
id_hash = calculate_hash(source, context)
offset = 0
while id_hash in id_hashes:
offset += 1
context = str(offset)
id_hash = calculate_hash(source, context)
id_hashes.add(id_hash)
if id_hash not in source_units:
source_units[id_hash] = source_translation.unit_set.create(
context=context,
source=source,
target=source,
state=STATE_READONLY,
position=position,
num_words=len(source.split()),
id_hash=id_hash,
)
source_units[id_hash].source_unit = source_units[id_hash]
source_units[id_hash].save()
store.new_unit(context, source, target)
# Migrate database
unit = translation.unit_set.create(
context=context,
source=source,
target=target,
state=STATE_TRANSLATED,
position=position,
num_words=len(source.split()),
id_hash=id_hash,
source_unit=source_units[id_hash],
)
# Adjust history entries (langauge and project should be already set)
term.change_set.update(
unit=unit,
translation=translation,
component=component,
)
store.save()
# Update translation hash
translation.revision = repo.get_object_hash(filename)
translation.save(update_fields=["revision"])
# Commit files
with repo.lock:
repo.execute(["add", repo_path])
if repo.needs_commit():
repo.commit("Migrate glossary content")
if total:
print(f"Migrating glossaries completed [{total}/{total}]")
class Migration(migrations.Migration):
dependencies = [
("trans", "0115_auto_20210201_1305"),
("glossary", "0005_set_source_language"),
]
operations = [migrations.RunPython(migrate_glossaries, elidable=True)]
| gpl-3.0 | 6,813,968,418,671,555,000 | 36.230366 | 89 | 0.513992 | false |
gragas/projecteuler | 0067/python3/solution.py | 1 | 1043 | """
By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom in triangle.txt (right click and 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows.
NOTE: This is a much more difficult version of Problem 18. It is not possible to try every route to solve this problem, as there are 299 altogether! If you could check one trillion (1012) routes every second it would take over twenty billion years to check them all. There is an efficient algorithm to solve it. ;o)
"""
rows = []
with open("triangle.txt", "r") as triangle_file:
for line in triangle_file:
rows.append([int(num) for num in line.strip().split()])
# Accumulate :)
for row in range(len(rows) - 2, -1, -1):
rows[row] = [max(rows[row+1][indx], rows[row+1][indx+1]) + rows[row][indx] \
for indx in range(len(rows[row]))]
print(rows[0][0])
| mit | 5,297,930,964,856,123,000 | 39.115385 | 315 | 0.685523 | false |
slgobinath/SafeEyes | safeeyes/plugins/mediacontrol/plugin.py | 1 | 2495 | #!/usr/bin/env python
# Safe Eyes is a utility to remind you to take break frequently
# to protect your eyes from eye strain.
# Copyright (C) 2019 Gobinath
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Media Control plugin lets users to pause currently playing media player from the break screen.
"""
import logging
import os
import dbus
import re
import gi
from safeeyes.model import TrayAction
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
tray_icon_path = None
def __active_players():
"""
List of all media players which are playing now.
"""
players = []
bus = dbus.SessionBus()
for service in bus.list_names():
if re.match('org.mpris.MediaPlayer2.', service):
player = bus.get_object(service, "/org/mpris/MediaPlayer2")
interface = dbus.Interface(player, 'org.freedesktop.DBus.Properties')
status = str(interface.Get('org.mpris.MediaPlayer2.Player', 'PlaybackStatus')).lower()
if status == "playing":
players.append(player)
return players
def __pause_players(players):
"""
Pause all playing media players using dbus.
"""
for player in players:
interface = dbus.Interface(player, dbus_interface='org.mpris.MediaPlayer2.Player')
interface.Pause()
def init(ctx, safeeyes_config, plugin_config):
"""
Initialize the screensaver plugin.
"""
global tray_icon_path
tray_icon_path = os.path.join(plugin_config['path'], "resource/pause.png")
def get_tray_action(break_obj):
"""
Return TrayAction only if there is a media player currently playing.
"""
players = __active_players()
if players:
return TrayAction.build("Pause media",
tray_icon_path,
Gtk.STOCK_MEDIA_PAUSE,
lambda: __pause_players(players))
| gpl-3.0 | -5,804,036,733,867,874,000 | 30.987179 | 98 | 0.668537 | false |
robmcmullen/peppy | peppy/editra/ed_style.py | 1 | 34887 | ###############################################################################
# Name: ed_style.py #
# Purpose: Editra's style management system. Implements the interpretation of #
# Editra Style Sheets to the StyledTextCtrl. #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
Provides a system for managing styles in the text control. Compiles the data
in an Editra Style Sheet to a format that Scintilla can understand. The
specification of Editra Style Sheets that this module implements can be found
either in the _docs_ folder of the source distribution or on Editra's home page
U{http://editra.org/?page=docs&doc=ess_spec}.
@summary: Style management system for managing the syntax highlighting of all
buffers
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: ed_style.py 54472 2008-07-03 03:28:11Z CJP $"
__revision__ = "$Revision: 54472 $"
#--------------------------------------------------------------------------#
# Dependancies
import os
import re
import wx
import util
from profiler import Profile_Get, Profile_Set
# Globals
STY_ATTRIBUTES = u"face fore back size"
STY_EX_ATTRIBUTES = u"eol bold italic underline"
# Parser Values
RE_ESS_COMMENT = re.compile("\/\*[^*]*\*+([^/][^*]*\*+)*\/")
RE_ESS_SCALAR = re.compile("\%\([a-zA-Z0-9]+\)")
RE_HEX_STR = re.compile("#[0-9a-fA-F]{3,6}")
#--------------------------------------------------------------------------#
class StyleItem(object):
"""A storage class for holding styling information
@todo: The extra Attributes should be saved as a separate attribute in the
StyleItem. This currenlty causes problems when customizing values in
the StyleEditor. Changing this is fairly easy in this class but it
will require changes to the StyleMgr and Editor as well.
"""
def __init__(self, fore=wx.EmptyString, back=wx.EmptyString,
face=wx.EmptyString, size=wx.EmptyString):
"""Initiliazes the Style Object.
@keyword fore: Specifies the forground color (hex string)
@keyword face: Specifies the font face (string face name)
@keyword back: Specifies the background color (hex string)
@keyword size: Specifies font point size (int/formatted string)
SPECIFICATION:
- DATA FORMATS:
- #123456 = hex color code
- #123456,bold = hex color code + extra style
- Monaco = Font Face Name
- %(primary)s = Format string to be swapped at runtime
- 10 = A font point size
- %(size)s = Format string to be swapped at runtime
"""
object.__init__(self)
self.null = False
self.fore = fore # Foreground color hex code
self.face = face # Font face name
self.back = back # Background color hex code
self.size = size # Font point size
def __eq__(self, si2):
"""Defines the == operator for the StyleItem Class
@param si2: style item to compare to
@return: whether the two items are equal
@rtype: bool
"""
return str(self) == str(si2)
def __str__(self):
"""Converts StyleItem to a string
@note: this return string is in a format that can be accepted by
Scintilla. No spaces may be in the string after the ':'.
@return: string representation of the StyleItem
"""
style_str = wx.EmptyString
if self.fore:
style_str = u"fore:%s," % self.fore
if self.back:
style_str += u"back:%s," % self.back
if self.face:
style_str += u"face:%s," % self.face
if self.size:
style_str += u"size:%s," % str(self.size)
if len(style_str) and style_str[-1] == u',':
style_str = style_str[0:-1]
return style_str
#---- Get Functions ----#
def GetAsList(self):
"""Returns a list of attr:value strings
this style item.
@return: list attribute values usable for building stc or ess values
"""
retval = list()
for attr in ('fore', 'back', 'face', 'size'):
val = getattr(self, attr, None)
if val not in ( None, wx.EmptyString ):
retval.append(attr + ':' + val)
return retval
def GetBack(self):
"""Returns the value of the back attribute
@return: style items background attribute
"""
return self.back.split(',')[0]
def GetFace(self):
"""Returns the value of the face attribute
@return: style items font face attribute
"""
return self.face.split(',')[0]
def GetFore(self):
"""Returns the value of the fore attribute
@return: style items foreground attribute
"""
return self.fore.split(',')[0]
def GetSize(self):
"""Returns the value of the size attribute as a string
@return: style items font size attribute
"""
return self.size.split(',')[0]
def GetNamedAttr(self, attr):
"""Get the value of the named attribute
@param attr: named attribute to get value of
"""
return getattr(self, attr, None)
#---- Utilities ----#
def IsNull(self):
"""Return whether the item is null or not
@return: bool
"""
return self.null
def IsOk(self):
"""Check if the style item is ok or not, if it has any of its
attributes set it is percieved as ok.
@return: bool
"""
return len(self.__str__())
def Nullify(self):
"""Clear all values and set item as Null
@postcondition: item is turned into a NullStyleItem
"""
self.null = True
for attr in ('fore', 'face', 'back', 'size'):
setattr(self, attr, '')
#---- Set Functions ----#
def SetAttrFromStr(self, style_str):
"""Takes style string and sets the objects attributes
by parsing the string for the values. Only sets or
overwrites values does not zero out previously set values.
Returning True if value(s) are set or false otherwise.
@param style_str: style information string (i.e fore:#888444)
@type style_str: string
"""
self.null = False
last_set = wx.EmptyString
for atom in style_str.split(u','):
attrib = atom.split(u':')
if len(attrib) == 2 and attrib[0] in STY_ATTRIBUTES:
last_set = attrib[0]
setattr(self, attrib[0], attrib[1])
elif attrib[0] in STY_EX_ATTRIBUTES and last_set != wx.EmptyString:
l_val = getattr(self, last_set)
setattr(self, last_set, u",".join([l_val, attrib[0]]))
else:
pass
return last_set != wx.EmptyString
def SetBack(self, back, ex=wx.EmptyString):
"""Sets the Background Value
@param back: hex color string, or None to clear attribute
@keyword ex: extra attribute (i.e bold, italic, underline)
"""
self.null = False
if back is None or ex == wx.EmptyString:
self.back = back
else:
self.back = u"%s,%s" % (back, ex)
def SetFace(self, face, ex=wx.EmptyString):
"""Sets the Face Value
@param face: font name string, or None to clear attribute
@keyword ex: extra attribute (i.e bold, italic, underline)
"""
if face is None or ex == wx.EmptyString:
self.face = face
else:
self.face = u"%s,%s" % (face, ex)
def SetFore(self, fore, ex=wx.EmptyString):
"""Sets the Foreground Value
@param fore: hex color string, or None to clear attribute
@keyword ex: extra attribute (i.e bold, italic, underline)
"""
self.null = False
if fore is None or ex == wx.EmptyString:
self.fore = fore
else:
self.fore = u"%s,%s" % (fore, ex)
def SetSize(self, size, ex=wx.EmptyString):
"""Sets the Font Size Value
@param size: font point size, or None to clear attribute
@type size: string or int
@keyword ex: extra attribute (i.e bold, italic, underline)
"""
self.null = False
if size is None or ex == wx.EmptyString:
self.size = size
else:
self.size = u"%s,%s" % (str(size), ex)
def SetExAttr(self, ex_attr, add=True):
"""Adds an extra text attribute to a StyleItem. Currently
(bold, eol, italic, underline) are supported. If the optional
add value is set to False the attribute will be removed from
the StyleItem.
@param ex_attr: extra style attribute (bold, eol, italic, underline)
@type ex_attr: string
@keyword add: Add a style (True) or remove a style (False)
"""
# Get currently set attributes
self.null = False
cur_str = self.__str__()
if not add:
cur_str = cur_str.replace(u',' + ex_attr, wx.EmptyString)
self.SetAttrFromStr(cur_str)
else:
if u',' + ex_attr not in cur_str:
attr_map = { u"fore" : self.GetFore(),
u"back" : self.GetBack(),
u"face" : self.GetFace(),
u"size" : self.GetSize()
}
for key in attr_map:
if len(attr_map[key]) and u"," not in attr_map[key]:
setattr(self, key, u",".join([attr_map[key], ex_attr]))
break
else:
pass
def SetNamedAttr(self, attr, value):
"""Sets a StyleItem attribute by named string.
@note: This is not intended to be used for setting extra
attributes such as bold, eol, ect..
@param attr: a particular attribute to set (i.e fore, face, back, size)
@param value: value to set the attribute to contain
"""
self.null = False
cur_val = getattr(self, attr, None)
if cur_val is not None:
if u"," in cur_val:
tmp = cur_val.split(u",")
tmp[0] = value
value = u",".join(tmp)
setattr(self, attr, value)
#-----------------------------------------------------------------------------#
class StyleMgr(object):
"""Manages style definitions and provides them on request.
Also provides functionality for loading custom style sheets and
modifying styles during run time.
"""
STYLES = dict() # Cache for loaded style set(s)
FONT_PRIMARY = u"primary"
FONT_SECONDARY = u"secondary"
FONT_SIZE = u"size"
FONT_SIZE2 = u"size2"
FONT_SIZE3 = u"size3"
def __init__(self, custom=wx.EmptyString):
"""Initializes the Style Manager
@keyword custom: path to custom style sheet to use
"""
object.__init__(self)
# Attributes
self.fonts = self.GetFontDictionary()
self.style_set = custom
self.LOG = wx.GetApp().GetLog()
# Get the Style Set
if custom != wx.EmptyString and self.LoadStyleSheet(custom):
self.LOG("[ed_style][info] Loaded custom style sheet %s" % custom)
else:
self.LOG("[ed_style][err] Failed to import styles from %s" % custom)
def BlankStyleDictionary(self):
"""Returns a dictionary of unset style items based on the
tags defined in the current dictionary.
@return: dictionary of unset style items using the current tag set
as keys.
"""
sty_dict = dict()
for key in DefaultStyleDictionary().keys():
if key in ('select_style', 'whitespace_style'):
sty_dict[key] = NullStyleItem()
else:
sty_dict[key] = StyleItem("#000000", "#FFFFFF",
"%(primary)s", "%(size)d")
return sty_dict
def GetFontDictionary(self, default=True):
"""Does a system lookup to build a default set of fonts using
ten point fonts as the standard size.
@keyword default: return the default dictionary of fonts, else return
the current running dictionary of fonts if it exists.
@type default: bool
@return: font dictionary (primary, secondary) + (size, size2)
"""
if hasattr(self, 'fonts') and not default:
return self.fonts
font = Profile_Get('FONT1', 'font', None)
if font is not None:
mfont = font
else:
mfont = wx.Font(10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL)
Profile_Set('FONT1', mfont, 'font')
primary = mfont.GetFaceName()
font = Profile_Get('FONT2', 'font', None)
if font is None:
font = wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL)
Profile_Set('FONT2', font, 'font')
secondary = font.GetFaceName()
faces = {
self.FONT_PRIMARY : primary,
self.FONT_SECONDARY : secondary,
self.FONT_SIZE : mfont.GetPointSize(),
self.FONT_SIZE2 : font.GetPointSize(),
self.FONT_SIZE3 : mfont.GetPointSize() - 2
}
return faces
def GetDefaultFont(self):
"""Constructs and returns a wxFont object from the settings
of the default_style object.
@return: font object of default style
@rtype: wx.Font
"""
if self.HasNamedStyle('default_style'):
style_item = self.GetItemByName('default_style')
face = style_item.GetFace()
if face[0] == u"%":
face = face % self.fonts
size = style_item.GetSize()
if isinstance(size, basestring):
size = size % self.fonts
font = wx.FFont(int(size), wx.MODERN, face=face)
else:
font = wx.FFont(self.fonts[self.FONT_SIZE], wx.MODERN)
return font
def GetDefaultForeColour(self, as_hex=False):
"""Gets the foreground color of the default style and returns
a Colour object. Otherwise returns Black if the default
style is not found.
@keyword as_hex: return a hex string or colour object
@type as_hex: bool
@return: wx.Colour of default style foreground or hex value
@rtype: wx.Colour or string
"""
fore = self.GetItemByName('default_style').GetFore()
if fore == wx.EmptyString:
fore = u"#000000"
if not as_hex:
rgb = util.HexToRGB(fore[1:])
fore = wx.Colour(red=rgb[0], green=rgb[1], blue=rgb[2])
return fore
def GetCurrentStyleSetName(self):
"""Get the name of the currently set style
@return: string
"""
return self.style_set
def GetDefaultBackColour(self, as_hex=False):
"""Gets the background color of the default style and returns
a Colour object. Otherwise returns white if the default
style is not found.
@keyword hex: return a hex string or colour object
@type hex: bool
@return: wx.Colour of default style background or hex value
@rtype: wx.Colour or string
"""
back = self.GetItemByName('default_style').GetBack()
if back == wx.EmptyString:
back = u"#FFFFFF"
if not as_hex:
rgb = util.HexToRGB(back[1:])
back = wx.Colour(red=rgb[0], green=rgb[1], blue=rgb[2])
return back
def GetItemByName(self, name):
"""Gets and returns a style item using its name for the search
@param name: tag name of style item to get
@return: style item (may be empty/null style item)
@rtype: L{StyleItem}
"""
if self.HasNamedStyle(name):
if u"%" in unicode(StyleMgr.STYLES[self.style_set][name]):
val = unicode(StyleMgr.STYLES[self.style_set][name]) % self.fonts
item = StyleItem()
item.SetAttrFromStr(val)
return item
else:
return StyleMgr.STYLES[self.style_set][name]
else:
return StyleItem()
def GetStyleFont(self, primary=True):
"""Returns the primary font facename by default
@keyword primary: Get Primary(default) or Secondary Font
@return face name of current font in use
"""
if primary:
font = wx.FFont(self.fonts[self.FONT_SIZE], wx.DEFAULT,
face=self.fonts[self.FONT_PRIMARY])
else:
font = wx.FFont(self.fonts[self.FONT_SIZE2], wx.DEFAULT,
face=self.fonts[self.FONT_SECONDARY])
return font
def GetStyleByName(self, name):
"""Gets and returns a style string using its name for the search
@param name: tag name of style to get
@type name: string
@return: style item in string form
@rtype: string
"""
if self.HasNamedStyle(name):
return unicode(self.GetItemByName(name))
else:
return wx.EmptyString
def GetStyleSet(self):
"""Returns the current set of styles or the default set if
there is no current set.
@return: current style set dictionary
@rtype: dict
"""
return StyleMgr.STYLES.get(self.style_set, DefaultStyleDictionary())
def HasNamedStyle(self, name):
"""Checks if a style has been set/loaded or not
@param name: tag name of style to look for
@return: whether item is in style set or not
"""
return self.GetStyleSet().has_key(name)
def LoadStyleSheet(self, style_sheet, force=False):
"""Loads a custom style sheet and returns True on success
@param style_sheet: path to style sheet to load
@keyword force: Force reparse of style sheet, default is to use cached
data when available
@return: whether style sheet was loaded or not
@rtype: bool
"""
if isinstance(style_sheet, basestring) and \
os.path.exists(style_sheet) and \
((force or not StyleMgr.STYLES.has_key(style_sheet)) or \
style_sheet != self.style_set):
reader = util.GetFileReader(style_sheet)
if reader == -1:
self.LOG("[ed_style][err] Failed to open style sheet: %s" % style_sheet)
return False
ret_val = self.SetStyles(style_sheet, self.ParseStyleData(reader.read()))
reader.close()
return ret_val
elif not StyleMgr.STYLES.has_key(style_sheet):
self.LOG("[ed_style][warn] Style sheet %s does not exists" % style_sheet)
# Reset to default style
Profile_Set('SYNTHEME', 'default')
self.SetStyles('default', DefaultStyleDictionary())
return False
else:
self.LOG("[ed_style][info] Using cached style data")
return True
def PackStyleSet(self, style_set):
"""Checks the difference of each item in the style set as
compared to the default_style tag and packs any unset value
in the item to be equal to the default style.
@param style_set: style set to pack
@return: style_set with all unset attributes set to match default style
"""
if isinstance(style_set, dict) and style_set.has_key('default_style'):
default = style_set['default_style']
for tag in style_set:
if style_set[tag].IsNull():
continue
if style_set[tag].GetFace() == wx.EmptyString:
style_set[tag].SetFace(default.GetFace())
if style_set[tag].GetFore() == wx.EmptyString:
style_set[tag].SetFore(default.GetFore())
if style_set[tag].GetBack() == wx.EmptyString:
style_set[tag].SetBack(default.GetBack())
if style_set[tag].GetSize() == wx.EmptyString:
style_set[tag].SetSize(default.GetSize())
else:
pass
return style_set
def ParseStyleData(self, style_data, strict=False):
"""Parses a string style definitions read from an Editra
Style Sheet. If the parameter 'strict' isnt set then any
syntax errors are ignored and only good values are returned.
If 'strict' is set the parser will raise errors and bailout.
@param style_data: style sheet data string
@type style_data: string
@keyword strict: should the parser raise errors or ignore
@return: dictionary of StyleItems constructed from the style sheet
data.
"""
# Remove all comments
style_data = RE_ESS_COMMENT.sub(u'', style_data)
# Compact data into a contiguous string
style_data = style_data.replace(u"\r\n", u"").replace(u"\n", u"")
style_data = style_data.replace(u"\t", u"")
## Build style data tree
# Tree Level 1 split tag from data
style_tree = [style.split(u"{") for style in style_data.split(u'}')]
if len(style_tree) and style_tree[-1][0] == wx.EmptyString:
style_tree.pop()
# Check for Level 1 Syntax Errors
tmp = style_tree
for style in tmp:
if len(style) != 2:
self.LOG("[ed_style][err] There was an error parsing "
"the syntax data from " + self.style_set)
self.LOG("[ed_style][err] You are missing a { or } " +
"in Def: " + style[0].split()[0])
if strict:
raise SyntaxError, \
"Missing { or } near Def: %s" % style[0].split()[0]
else:
style_tree.remove(style)
# Tree Level 2 Build small trees of tag and style attributes
# Tree Level 3 Branch tree into TAG => Attr => Value String
for branch in style_tree:
tmp2 = [leaf.strip().split(u":")
for leaf in branch[1].strip().split(u";")]
if len(tmp2) and tmp2[-1][0] == wx.EmptyString:
tmp2.pop()
branch[1] = tmp2
# Check for L2/L3 Syntax errors and build a clean dictionary
# of Tags => Valid Attributes
style_dict = dict()
for branch in style_tree:
value = list()
tag = branch[0].replace(u" ", u"")
for leaf in branch[1]:
# Remove any remaining whitespace
leaf = [part.strip() for part in leaf]
if len(leaf) != 2:
self.LOG("[ed_style][err] Missing a : or ; in the "
"declaration of %s" % tag)
if strict:
raise SyntaxError, "Missing : or ; in def: %s" % tag
elif leaf[0] not in STY_ATTRIBUTES:
self.LOG(("[ed_style][warn] Unknown style attribute: %s"
", In declaration of %s") % (leaf[0], tag))
if strict:
raise SyntaxWarning, "Unknown attribute %s" % leaf[0]
else:
value.append(leaf)
style_dict[tag] = value
# Trim the leafless branches from the dictionary
tmp = style_dict.copy()
for style_def in tmp:
if len(tmp[style_def]) == 0:
style_dict.pop(style_def)
# Validate leaf values and format into style string
for style_def in style_dict:
if not style_def[0][0].isalpha():
self.LOG("[ed_style][err] The style def %s is not a "
"valid name" % style_def[0])
if strict:
raise SyntaxError, "%s is an invalid name" % style_def[0]
else:
style_str = wx.EmptyString
for attrib in style_dict[style_def]:
values = [ val for val in attrib[1].split(u" ")
if val != wx.EmptyString ]
if len(values) > 2:
self.LOG("[ed_style][warn] Only one extra " +
"attribute can be set per style. See " +
style_def + " => " + attrib[0])
if strict:
raise SyntaxWarning
# Validate values
v1ok = v2ok = False
if attrib[0] in "fore back" and RE_HEX_STR.match(values[0]):
v1ok = True
elif len(values) and attrib[0] == "size":
if RE_ESS_SCALAR.match(values[0]) or values[0].isdigit():
v1ok = True
else:
self.LOG("[ed_style][warn] Bad value in %s"
" the value %s is invalid." % \
(attrib[0], values[0]))
elif len(values) and attrib[0] == "face":
if len(values) == 2 and \
values[1] not in STY_EX_ATTRIBUTES:
values = [u' '.join(values)]
v1ok = True
if len(values) == 2 and values[1] in STY_EX_ATTRIBUTES:
v2ok = True
elif len(values) == 2:
self.LOG("[ed_style][warn] Unknown extra " + \
"attribute '" + values[1] + \
"' in attribute: " + attrib[0])
if v1ok and v2ok:
value = u",".join(values)
elif v1ok:
value = values[0]
else:
continue
style_str = u",".join([style_str,
u":".join([attrib[0], value])])
if style_str != wx.EmptyString:
style_dict[style_def] = style_str.strip(u",")
# Build a StyleItem Dictionary
for key, value in style_dict.iteritems():
new_item = StyleItem()
if isinstance(value, basestring):
new_item.SetAttrFromStr(value)
style_dict[key] = new_item
# For any undefined tags load them as empty items
# for key in DefaultStyleDictionary().keys():
# if key not in style_dict:
# style_dict[key] = StyleItem()
return style_dict
def SetGlobalFont(self, font_tag, fontface, size=-1):
"""Sets one of the fonts in the global font set by tag
and sets it to the named font. Returns true on success.
@param font_tag: fonttype identifier key
@param fontface: face name to set global font to
"""
if hasattr(self, 'fonts'):
self.fonts[font_tag] = fontface
if size > 0:
self.fonts[self.FONT_SIZE] = size
return True
else:
return False
def SetStyleFont(self, wx_font, primary=True):
"""Sets the\primary or secondary font and their respective
size values.
@param wx_font: font object to set styles font info from
@keyword primary: Set primary(default) or secondary font
"""
if primary:
self.fonts[self.FONT_PRIMARY] = wx_font.GetFaceName()
self.fonts[self.FONT_SIZE] = wx_font.GetPointSize()
else:
self.fonts[self.FONT_SECONDARY] = wx_font.GetFaceName()
self.fonts[self.FONT_SIZE2] = wx_font.GetPointSize()
def SetStyleTag(self, style_tag, value):
"""Sets the value of style tag by name
@param style_tag: desired tag name of style definition
@param value: style item to set tag to
"""
StyleMgr.STYLES[self.style_set][style_tag] = value
def SetStyles(self, name, style_dict, nomerge=False):
"""Sets the managers style data and returns True on success.
@param name: name to store dictionary in cache under
@param style_dict: dictionary of style items to use as managers style
set.
@keyword nomerge: merge against default set or not
@type nomerge: bool
"""
if nomerge:
self.style_set = name
StyleMgr.STYLES[name] = self.PackStyleSet(style_dict)
return True
# Merge the given style set with the default set to fill in any
# unset attributes/tags
if isinstance(style_dict, dict):
# Check for bad data
for style in style_dict.values():
if not isinstance(style, StyleItem):
self.LOG("[ed_style][err] Invalid data in style dictionary")
return False
self.style_set = name
defaultd = DefaultStyleDictionary()
dstyle = style_dict.get('default_style', None)
if dstyle is None:
style_dict['default_style'] = defaultd['default_style']
# Set any undefined styles to match the default_style
for tag, item in defaultd.iteritems():
if tag not in style_dict:
if tag in ['select_style', 'whitespace_style']:
style_dict[tag] = NullStyleItem()
else:
style_dict[tag] = style_dict['default_style']
StyleMgr.STYLES[name] = self.PackStyleSet(style_dict)
return True
else:
self.LOG("[ed_style][err] SetStyles expects a " \
"dictionary of StyleItems")
return False
#-----------------------------------------------------------------------------#
# Utility Functions
def DefaultStyleDictionary():
"""This is the default style values that are used for styling
documents. Its used as a fallback for undefined values in a
style sheet.
@note: incomplete style sheets are merged against this set to ensure
a full set of definitions is avaiable
"""
def_dict = \
{'brace_good' : StyleItem("#FFFFFF", "#0000FF,bold"),
'brace_bad' : StyleItem(back="#FF0000,bold"),
'calltip' : StyleItem("#404040", "#FFFFB8"),
'caret_line' : StyleItem(back="#D8F8FF"),
'ctrl_char' : StyleItem(),
'line_num' : StyleItem(back="#C0C0C0", face="%(secondary)s", \
size="%(size3)d"),
'array_style': StyleItem("#EE8B02,bold", face="%(secondary)s"),
'btick_style': StyleItem("#8959F6,bold", size="%(size)d"),
'default_style': StyleItem("#000000", "#F6F6F6", \
"%(primary)s", "%(size)d"),
'char_style' : StyleItem("#FF3AFF"),
'class_style' : StyleItem("#2E8B57,bold"),
'class2_style' : StyleItem("#2E8B57,bold"),
'comment_style' : StyleItem("#838383"),
'decor_style' : StyleItem("#BA0EEA italic", face="%(secondary)s"),
'directive_style' : StyleItem("#0000FF,bold", face="%(secondary)s"),
'dockey_style' : StyleItem("#0000FF"),
'error_style' : StyleItem("#DD0101,bold", face="%(secondary)s"),
'foldmargin_style' : StyleItem(back="#D1D1D1"),
'funct_style' : StyleItem("#008B8B,italic"),
'global_style' : StyleItem("#007F7F,bold", face="%(secondary)s"),
'guide_style' : StyleItem("#838383"),
'here_style' : StyleItem("#CA61CA,bold", face="%(secondary)s"),
'ideol_style' : StyleItem("#E0C0E0", face="%(secondary)s"),
'keyword_style' : StyleItem("#A52B2B,bold"),
'keyword2_style' : StyleItem("#2E8B57,bold"),
'keyword3_style' : StyleItem("#008B8B,bold"),
'keyword4_style' : StyleItem("#9D2424"),
'marker_style' : StyleItem("#FFFFFF", "#000000"),
'number_style' : StyleItem("#DD0101"),
'number2_style' : StyleItem("#DD0101,bold"),
'operator_style' : StyleItem("#000000", face="%(primary)s,bold"),
'pre_style' : StyleItem("#AB39F2,bold"),
'pre2_style' : StyleItem("#AB39F2,bold", "#FFFFFF"),
'regex_style' : StyleItem("#008B8B"),
'scalar_style' : StyleItem("#AB37F2,bold", face="%(secondary)s"),
'scalar2_style' : StyleItem("#AB37F2", face="%(secondary)s"),
'select_style' : NullStyleItem(), # Use system default colour
'string_style' : StyleItem("#FF3AFF,bold"),
'stringeol_style' : StyleItem("#000000,bold", "#EEC0EE,eol", \
"%(secondary)s"),
'unknown_style' : StyleItem("#FFFFFF,bold", "#DD0101,eol"),
'userkw_style' : StyleItem(),
'whitespace_style' : StyleItem('#838383')
}
return def_dict
def MergeFonts(style_dict, font_dict):
"""Does any string substitution that the style dictionary
may need to have fonts and their sizes set.
@param style_dict: dictionary of L{StyleItem}
@param font_dict: dictionary of font data
@return: style dictionary with all font format strings substituded in
"""
for style in style_dict:
st_str = str(style_dict[style])
if u'%' in st_str:
style_dict[style].SetAttrFromStr(st_str % font_dict)
return style_dict
def MergeStyles(styles1, styles2):
"""Merges the styles from styles2 into styles1 overwriting
any duplicate values already set in styles1 with the new
data from styles2.
@param styles1: dictionary of StyleItems recieve merge
@param styles2: dictionary of StyleItems to merge from
@return: style1 with all values from styles2 merged into it
"""
for style in styles2:
styles1[style] = styles2[style]
return styles1
def NullStyleItem():
"""Create a null style item
@return: empty style item that cannot be merged
"""
item = StyleItem()
item.null = True
return item
| gpl-2.0 | 6,960,751,333,073,037,000 | 38.198876 | 88 | 0.539857 | false |
oser-cs/oser-website | core/management/commands/utils.py | 1 | 4368 | """Utils for populatedb."""
import os
import re
import string
from itertools import cycle
from contextlib import contextmanager
from django.core.files import File
from django.db.models.base import ModelBase
from factory.base import FactoryMetaClass
HERE = os.path.dirname(os.path.abspath(__file__))
def format_keys(s):
"""Return tuple of format keys in s.
format_keys('Hi {person}. Eat {food}? {}') => ('person', 'good', '')
"""
formatter = string.Formatter()
return tuple(tup[1] for tup in formatter.parse(s) if tup[1] is not None)
assert format_keys('Hi {person}. Eat {food}? {}') == ('person', 'food', '')
class DataLoader:
"""Simple utility class to load data files."""
def __init__(self, path=None):
if path is None:
path = os.path.join(HERE, 'data')
self.path = path
@contextmanager
def load(self, filename):
"""Load a single file and return it as a Django File object."""
path = os.path.join(self.path, filename)
f = open(path, 'rb')
try:
wrapped_file = File(f)
# Default name of the file is the full path to file.
# Use only the filename.
wrapped_file.name = filename
yield wrapped_file
finally:
f.close()
class SeqDataLoader(DataLoader):
"""Iterable that yields filenames to a given amount of resources.
Resources are cycled through if the required amount exceeds
the amount of resources available.
Usage
-----
for filename in DataLoader('resource-{i}.txt', 6):
with open(filename) as text:
print(text)
"""
def __init__(self, resource_format, amount, **kwargs):
super().__init__(**kwargs)
self.pattern = self._make_pattern(resource_format)
self.amount = amount
self.resources = self._find_resources()
@staticmethod
def _make_pattern(fmt):
if 'i' not in format_keys(fmt):
raise ValueError('Resource format {} must contain key "i"'
.format(fmt))
return re.compile('^' + fmt.replace('{i}', '.*') + '$')
def _find_resources(self):
return [f for f in os.listdir(self.path)
if self.pattern.match(f)]
def __iter__(self):
resources = cycle(self.resources)
for _ in range(self.amount):
filename = next(resources)
with self.load(filename) as file_:
yield file_
def get_model(element):
"""Convert element to a Django Model class.
Element can be a Model class or a DjangoModelFactory class.
"""
if isinstance(element, FactoryMetaClass):
return element._meta.model
if not isinstance(element, ModelBase):
raise ValueError(
'Expected Model or DjangoModelFactory, got {}'
.format(type(element)))
return element
def watcher(*watched):
"""Decorator to report changes in amounts of objects in a Command.
Counts number of objects per model before and after the decorated
function is executed, and shows the difference in nicely formatted
messages.
Usage
-----
class MyCommand(BaseCommand):
@watcher(MyModel)
def do_something(self):
... create or delete MyModel instances here ...
Parameters
----------
*watched :
List of Model-like (FactoryBoy DjangoModelFactory also accepted).
"""
watched = list(map(get_model, watched))
def get_counts():
return [(model._meta.verbose_name_plural, model.objects.all().count())
for model in watched]
def decorator(func):
def watched_func(self, *args, **kwargs):
counts_before = get_counts()
rv = func(self, *args, **kwargs)
counts_after = get_counts()
diffs = ((name, after - before)
for (name, after), (name, before)
in zip(counts_after, counts_before))
for name, diff in diffs:
if diff > 0:
self.stdout.write(
'Created {} {}'.format(diff, name))
elif diff < 0:
self.stdout.write(
'Deleted {} {}'.format(-diff, name))
return rv
return watched_func
return decorator
| gpl-3.0 | 1,531,607,801,651,571,700 | 28.714286 | 78 | 0.57967 | false |
sklam/pymothoa | test/testbitwise.py | 1 | 2259 | import logging
logging.basicConfig(level=logging.DEBUG)
from pymothoa.jit import default_module, function
from pymothoa.types import *
from pymothoa.dialect import *
@function(ret=Int, args=[Int, Int])
def test_bitwise_and(A, B):
return A & B
@function(ret=Int, args=[Int, Int])
def test_bitwise_or(A, B):
return A | B
@function(ret=Int, args=[Int, Int])
def test_bitwise_xor(A, B):
return A ^ B
@function(ret=Int, args=[Int, Int])
def test_lshift(A, n):
return A << n
@function(ret=Int, args=[Int, Int])
def test_rshift(A, n):
return A >> n # arithmetic shift
default_module.optimize()
#-------------------------------------------------------------------------------
import unittest
from random import random, randint
class Test(unittest.TestCase):
def setUp(self):
self.REP = 100
def test_bitwise_and(self):
for _ in xrange(self.REP):
args = (randint(0, 100), randint(0, 100))
self.assertEqual(test_bitwise_and(*args), test_bitwise_and.run_py(*args))
def test_bitwise_or(self):
for _ in xrange(self.REP):
args = (randint(0, 100), randint(0, 100))
self.assertEqual(test_bitwise_or(*args), test_bitwise_or.run_py(*args))
def test_bitwise_xor(self):
for _ in xrange(self.REP):
args = (randint(0, 100), randint(0, 100))
self.assertEqual(test_bitwise_xor(*args), test_bitwise_xor.run_py(*args))
def test_lshift(self):
for amt in xrange(31):
args = (1, amt)
self.assertEqual(test_lshift(*args), test_lshift.run_py(*args))
for _ in xrange(self.REP):
args = (randint(-0xfff, 0xfff), randint(0, 16))
self.assertEqual(test_lshift(*args), test_lshift.run_py(*args))
def test_rshift(self):
for amt in xrange(31):
args = (-1, amt)
self.assertEqual(test_rshift(*args), test_rshift.run_py(*args))
for _ in xrange(self.REP):
args = (randint(-0xfff, 0xfff), randint(0, 31))
self.assertEqual(test_rshift(*args), test_rshift.run_py(*args))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(Test)
unittest.TextTestRunner(verbosity=2).run(suite)
| bsd-2-clause | -4,654,066,581,966,717,000 | 29.12 | 85 | 0.593183 | false |
mysql/mysql-utilities | scripts/mysqlrplms.py | 1 | 15549 | #!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the replicate utility. It is used to establish a
multi-source replication topology.
"""
import os.path
import sys
import logging
from mysql.utilities.common.tools import check_python_version
from mysql.utilities import VERSION_STRING
from mysql.utilities.exception import FormatError, UtilError, UtilRplError
from mysql.utilities.common.ip_parser import parse_connection
from mysql.utilities.common.messages import (
PARSE_ERR_OPTS_REQ_GREATER_OR_EQUAL,
PARSE_ERR_OPTS_REQ,
MSG_UTILITIES_VERSION
)
from mysql.utilities.common.options import (setup_common_options,
add_verbosity, add_rpl_user,
add_format_option,
add_ssl_options,
get_ssl_dict,
check_password_security)
from mysql.utilities.common.server import check_hostname_alias
from mysql.utilities.common.tools import check_connector_python
from mysql.utilities.common.my_print_defaults import MyDefaultsReader
from mysql.utilities.command.rpl_admin import purge_log
from mysql.utilities.command.setup_rpl import start_ms_replication
# Check Python version compatibility
check_python_version()
# Constants
NAME = "MySQL Utilities - mysqlrplms "
DESCRIPTION = "mysqlrplms - establish multi-source replication"
USAGE = ("%prog --slave=root@localhost:3306 --masters=root@localhost:3310,"
"root@localhost:3311 --rpl-user=rpl:passwd")
DATE_FORMAT = '%Y-%m-%d %H:%M:%S %p'
EXTENDED_HELP = """
Introduction
------------
The mysqlrplms utility is used to setup round robin multi-source replcation.
This technique can be a solution for aggregating streams of data from multiple
masters for a single slave.
The mysqlrplms utility follows these assumptions:
- All servers have GTIDs enabled.
- There are no conflicts between transactions from different sources/masters.
For example, there are no updates to the same object from multiple masters.
- Replication is asynchronous.
A round-robin scheduling is used to setup replication among the masters and
slave.
The utility can be run as a daemon on POSIX systems.
# Basic multi-source replication setup.
$ mysqlrplms --slave=root:pass@host1:3306 \\
--masters=root:pass@host2:3306,root:pass@host3:3306
# Multi-source replication setup using a different report values.
$ mysqlrplms --slave=root:pass@host1:3306 \\
--masters=root:pass@host2:3306,root:pass@host3:3306 \\
--report-values=health,gtid,uuid
# Start multi-source replication running as a daemon. (POSIX only)
$ mysqlrplms --slave=root:pass@host1:3306 \\
--masters=root:pass@host2:3306,root:pass@host3:3306 \\
--log=rplms_daemon.log --pidfile=rplms_daemon.pid \\
--daemon=start
# Restart a multi-source replication running as a daemon.
$ mysqlrplms --slave=root:pass@host1:3306 \\
--masters=root:pass@host2:3306,root:pass@host3:3306 \\
--log=rplms_daemon.log --pidfile=rplms_daemon.pid \\
--daemon=restart
# Stop a multi-source replication running as a daemon.
$ mysqlrplms --slave=root:pass@host1:3306 \\
--masters=root:pass@host2:3306,root:pass@host3:3306 \\
--log=rplms_daemon.log --pidfile=rplms_daemon.pid \\
--daemon=stop
Helpful Hints
-------------
- The default report value is 'health'.
This value can be changed with the --report-values option. It can be
'health', 'gtid' or 'uuid'. Multiple values can be used separated by
commas.
- The default output for reporting health is 'grid'.
This value can be changed with the --format option. It can be 'grid',
'tab', 'csv' or 'vertical' format.
- The default interval for reporting health is 15 seconds.
This value can be changed with the --interval option.
- The default interval for switching masters is 60 seconds.
This value can be changed with the --switchover-interval option.
"""
# Check for connector/python
if not check_connector_python():
sys.exit(1)
if __name__ == '__main__':
# Setup the command parser
program = os.path.basename(sys.argv[0]).replace(".py", "")
parser = setup_common_options(os.path.basename(sys.argv[0]),
DESCRIPTION, USAGE, True, False,
extended_help=EXTENDED_HELP)
# Setup utility-specific options:
# Interval for reporting health
parser.add_option("--interval", "-i", action="store", dest="interval",
type="int", default="15", help="interval in seconds for "
"reporting health. Default = 15 seconds. "
"Lowest value is 5 seconds.")
# Interval for switching masters
parser.add_option("--switchover-interval", action="store",
dest="switchover_interval",
type="int", default="60", help="interval in seconds for "
"switching masters. Default = 60 seconds. "
"Lowest value is 30 seconds.")
# Connection information for the sink server
parser.add_option("--slave", action="store", dest="slave",
type="string", default=None,
help="connection information for slave server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>]"
" or <config-path>[<[group]>]")
# Connection information for the masters servers
parser.add_option("--masters", action="store", dest="masters",
type="string", default=None, help="connection "
"information for master servers in the form: "
"<user>[:<password>]@<host>[:<port>][:<socket>] or "
"<login-path>[:<port>][:<socket>]"
" or <config-path>[<[group]>]. List multiple master "
"in comma-separated list.")
# Replication user and password
add_rpl_user(parser)
# Add start from beginning option
parser.add_option("-b", "--start-from-beginning", action="store_true",
default=False, dest="from_beginning",
help="start replication from the first event recorded "
"in the binary logging of the masters.")
# Add report values
parser.add_option("--report-values", action="store", dest="report_values",
type="string", default="health",
help="report values used in multi-source replication. "
"It can be health, gtid or uuid. Multiple values can be "
"used separated by commas. The default is health.")
# Add output format
add_format_option(parser, "display the output in either grid (default), "
"tab, csv, or vertical format", None)
# Add option to run as daemon
parser.add_option("--daemon", action="store", dest="daemon", default=None,
help="run on daemon mode. It can be start, stop, "
"restart or nodetach.", type="choice",
choices=("start", "stop", "restart", "nodetach"))
# Add pidfile for the daemon option
parser.add_option("--pidfile", action="store", dest="pidfile",
type="string", default=None, help="pidfile for running "
"mysqlrplms as a daemon.")
# Add a log file to use for logging messages
parser.add_option("--log", action="store", dest="log_file", default=None,
type="string", help="specify a log file to use for "
"logging messages")
# Add the maximum age of log entries in days for the logging system
parser.add_option("--log-age", action="store", dest="log_age", default=7,
type="int", help="specify maximum age of log entries in "
"days. Entries older than this will be purged on "
"startup. Default = 7 days.")
# Add ssl options
add_ssl_options(parser)
# Add verbosity
add_verbosity(parser)
# Now we process the rest of the arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args)
# Check if the values specified for the --report-values option are valid.
for report in opt.report_values.split(","):
if report.lower() not in ("health", "gtid", "uuid"):
parser.error("The value for the option --report-values is not "
"valid: '{0}', the values allowed are 'health', "
"'gitd' or 'uuid'".format(opt.report_values))
# Check for errors
if int(opt.interval) < 5:
parser.error(PARSE_ERR_OPTS_REQ_GREATER_OR_EQUAL.format(
opt="--interval", value=5))
if int(opt.switchover_interval) < 30:
parser.error(PARSE_ERR_OPTS_REQ_GREATER_OR_EQUAL.format(
opt="--switchover-interval", value=30))
# option --slave is required (mandatory)
if not opt.slave:
parser.error(PARSE_ERR_OPTS_REQ.format(opt="--slave"))
# option --masters is required (mandatory)
if not opt.masters:
parser.error(PARSE_ERR_OPTS_REQ.format(opt="--masters"))
# option --rpl-user is required (mandatory)
if not opt.rpl_user:
parser.error(PARSE_ERR_OPTS_REQ.format(opt="--rpl-user"))
config_reader = MyDefaultsReader(opt, False)
# Parse slave connection values
try:
slave_vals = parse_connection(opt.slave, config_reader, opt)
except FormatError:
_, err, _ = sys.exc_info()
parser.error("Slave connection values invalid: {0}.".format(err))
except UtilError:
_, err, _ = sys.exc_info()
parser.error("Slave connection values invalid: {0}."
"".format(err.errmsg))
# Parse masters connection values
masters_vals = []
masters = opt.masters.split(",")
if len(masters) == 1:
parser.error("At least two masters are required for multi-source "
"replication.")
for master in masters:
try:
masters_vals.append(parse_connection(master, config_reader, opt))
except FormatError as err:
msg = ("Masters connection values invalid or cannot be parsed: "
"{0} ({1})".format(master, err))
raise UtilRplError(msg)
except UtilError as err:
msg = ("Masters connection values invalid or cannot be parsed: "
"{0} ({1})".format(master, err.errmsg))
raise UtilRplError(msg)
# Check hostname alias
for master_vals in masters_vals:
if check_hostname_alias(slave_vals, master_vals):
parser.error("The master and slave are the same host and port.")
# Check the daemon options
if opt.daemon:
# Check if a POSIX system
if os.name != "posix":
parser.error("Running mysqlfailover with --daemon is only "
"available for POSIX systems.")
# Check the presence of --log
if opt.daemon != "stop" and not opt.log_file:
parser.error("The option --log is required when using --daemon.")
# Test pidfile
if opt.daemon != "nodetach":
pidfile = opt.pidfile or "./rplms_daemon.pid"
pidfile = os.path.realpath(os.path.normpath(pidfile))
if opt.daemon == "start":
# Test if pidfile exists
if os.path.exists(pidfile):
parser.error("pidfile {0} already exists. The daemon is "
"already running?".format(pidfile))
# Test if pidfile is writable
try:
with open(pidfile, "w") as f:
f.write("{0}\n".format(0))
# Delete temporary pidfile
os.remove(pidfile)
except IOError as err:
parser.error("Unable to write pidfile: {0}"
"".format(err.strerror))
else:
# opt.daemon == stop/restart, test if pidfile is readable
pid = None
try:
if not os.path.exists(pidfile):
parser.error("pidfile {0} does not exist."
"".format(pidfile))
with open(pidfile, "r") as f:
pid = int(f.read().strip())
except IOError:
pid = None
except ValueError:
pid = None
# Test pid presence
if not pid:
parser.error("Can not read pid from pidfile.")
if opt.pidfile and not opt.daemon:
parser.error("The option --daemon is required when using --pidfile.")
# Purge log file of old data
if opt.log_file is not None and not purge_log(opt.log_file, opt.log_age):
parser.error("Error purging log file.")
# Setup log file
try:
logging.basicConfig(filename=opt.log_file, level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt=DATE_FORMAT)
except IOError:
_, e, _ = sys.exc_info()
parser.error("Error opening log file: {0}".format(str(e.args[1])))
# Create dictionary of options
options = {
"verbosity": opt.verbosity,
"quiet": opt.quiet,
"interval": opt.interval,
"switchover_interval": opt.switchover_interval,
"from_beginning": opt.from_beginning,
"report_values": opt.report_values,
'format': opt.format,
"rpl_user": opt.rpl_user,
"daemon": opt.daemon,
"pidfile": opt.pidfile,
"logging": opt.log_file is not None,
"log_file": opt.log_file,
}
# Add ssl values to options instead of connection.
options.update(get_ssl_dict(opt))
# Log MySQL Utilities version string
if opt.log_file:
logging.info(MSG_UTILITIES_VERSION.format(utility=program,
version=VERSION_STRING))
try:
start_ms_replication(slave_vals, masters_vals, options)
except UtilError:
_, e, _ = sys.exc_info()
errmsg = e.errmsg.strip(" ")
if opt.log_file:
logging.log(logging.CRITICAL, errmsg)
print("ERROR: {0}".format(errmsg))
sys.exit(1)
sys.exit(0)
| gpl-2.0 | -7,878,587,118,950,629,000 | 39.282383 | 79 | 0.592707 | false |
zsiciarz/django-briefcase | briefcase/migrations/0002_auto__chg_field_document_file.py | 1 | 5068 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Document.file'
db.alter_column('briefcase_document', 'file', self.gf('django.db.models.fields.files.FileField')(max_length=100))
def backwards(self, orm):
# Changing field 'Document.file'
db.alter_column('briefcase_document', 'file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'briefcase.document': {
'Meta': {'ordering': "['-added_at']", 'object_name': 'Document'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['briefcase.DocumentStatus']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'briefcase.documentstatus': {
'Meta': {'ordering': "['-name']", 'object_name': 'DocumentStatus'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['briefcase']
| mit | 4,813,694,187,323,698,000 | 65.684211 | 182 | 0.557616 | false |
jason-neal/companion_simulations | tests/utilities/test_simulation_utils.py | 1 | 2347 | import numpy as np
import pytest
from spectrum_overload import Spectrum
from mingle.utilities.simulation_utilities import (check_inputs, max_delta,
spec_max_delta)
c = 299792.458
@pytest.mark.parametrize("xaxis, rv, gamma", [
([1, 2, 3, 4, 5], 3, 5),
([1.1, 1.2, 1.3, 1.4, 1.5], 0, -7.1)
])
def test_spec_max_delta_applies_max_delta_on_xaxis(xaxis, rv, gamma):
spec = Spectrum(xaxis=xaxis, flux=np.ones(len(xaxis)))
assert spec_max_delta(spec, rv, gamma) == max_delta(xaxis, rv, gamma)
@pytest.mark.parametrize("wav, rv, gamma, expected", [
([1], 3, 5, 5 / c),
([2], 1, -7.1, 2 * 7.1 / c),
(2, 1, 7.1, 2 * 7.1 / c),
([1, 2], 0, 0, 0)
])
def test_spec_max_delta(wav, rv, gamma, expected):
assert 2 * round(expected, 3) == max_delta(wav, rv, gamma)
@pytest.mark.parametrize("rv, gamma", [
(np.array([1, 2, 3, 4]), np.array([])),
(np.array([]), np.array([1, 2, 3, 4])),
([], np.array([1, 2, 3, 4])),
(np.array([1, 2, 3, 4]), [])])
def test_max_delta_with_empty_arrays(rv, gamma):
wav = np.arange(20)
with pytest.raises(ValueError) as excinfo:
max_delta(wav, rv, gamma)
assert 'Empty variable vector' in str(excinfo.value)
@pytest.mark.parametrize("inputs, expected", [
(range(5), np.array([0, 1, 2, 3, 4])),
("None", np.ndarray([0])),
(None, np.ndarray([0])),
([0], np.array([0])),
(1, np.array([1])),
(0, np.array([0]))
])
def test_check_inputs(inputs, expected):
assert np.allclose(check_inputs(inputs), expected)
@pytest.mark.parametrize("inputs", [[], np.array([]), {}, ()])
def test_check_inputs_raises_empty_error(inputs):
with pytest.raises(ValueError) as excinfo:
check_inputs(inputs)
assert "Empty variable" in str(excinfo.value)
from mingle.utilities.simulation_utilities import add_noise
@pytest.mark.xfail()
@pytest.mark.parametrize("noise", [
(5),
(500),
(50),
])
def test_add_noise(noise):
x = np.ones(10000)
assert np.allclose(np.std(add_noise(x, noise)), 1. / noise, 1e-2)
@pytest.mark.xfail()
@pytest.mark.parametrize("mu, noise", [
(20, 5),
(5, 500),
(1, 50),
])
def test_add_noise_with_mu(mu, noise):
x = mu * np.ones(10000)
assert np.allclose(np.std(add_noise(x, noise, use_mu=True)), mu / noise, 1e-2)
| mit | 349,818,910,205,054,500 | 26.611765 | 82 | 0.587985 | false |
ctk3b/InterMol | intermol/forces/improper_harmonic_dihedral_type.py | 1 | 1544 | import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_dihedral_type import AbstractDihedralType
class ImproperHarmonicDihedralType(AbstractDihedralType):
__slots__ = ['xi', 'k', 'improper']
@accepts_compatible_units(None, None, None, None,
xi=units.degrees,
k=units.kilojoules_per_mole * units.radians **(-2),
improper=None)
def __init__(self, bondingtype1, bondingtype2, bondingtype3, bondingtype4,
xi=0.0 * units.degrees,
k=0.0 * units.kilojoules_per_mole * units.radians **(-2),
improper=False):
AbstractDihedralType.__init__(self, bondingtype1, bondingtype2, bondingtype3, bondingtype4, improper)
self.xi = xi
self.k = k
class ImproperHarmonicDihedral(ImproperHarmonicDihedralType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, atom3, atom4, bondingtype1=None, bondingtype2=None, bondingtype3=None, bondingtype4=None,
xi=0.0 * units.degrees,
k=0.0 * units.kilojoules_per_mole * units.radians **(-2),
improper=False):
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
self.atom4 = atom4
ImproperHarmonicDihedralType.__init__(self, bondingtype1, bondingtype2, bondingtype3, bondingtype4,
xi=xi,
k=k,
improper=improper) | mit | 6,281,790,483,259,286,000 | 39.657895 | 127 | 0.600389 | false |
ndparker/rjsmin | tasks/clean.py | 1 | 1478 | # -*- encoding: ascii -*-
"""
Cleanup tasks
~~~~~~~~~~~~~
"""
import invoke as _invoke
@_invoke.task()
def py(ctx):
""" Wipe *.py[co] files """
for name in ctx.shell.files('.', '*.py[co]'):
ctx.shell.rm(name)
for name in ctx.shell.dirs('.', '__pycache__'):
ctx.shell.rm_rf(name)
@_invoke.task(py)
def dist(ctx):
""" Wipe all """
clean(ctx, so=True, cache=True)
@_invoke.task(py, default=True)
def clean(ctx, so=False, cache=False):
""" Wipe *.py[co] files and test leftovers """
for name in ctx.shell.files('.', '.coverage*', recursive=False):
ctx.shell.rm(name)
for name in ctx.shell.files('bench', '.out.*', recursive=False):
ctx.shell.rm(name)
ctx.shell.rm_rf(
'docs/coverage',
'docs/gcov',
'build',
'dist',
'wheel/dist',
ctx.doc.userdoc,
'docs/_userdoc/_build',
ctx.doc.website.source,
ctx.doc.website.target,
)
if cache:
cacheclean(ctx)
if so:
soclean(ctx)
@_invoke.task()
def cacheclean(ctx):
""" Wipe Cache files """
ctx.shell.rm_rf(
'.tox',
'bench/.tox',
'.cache',
'tests/.cache',
'tests/.pytest_cache',
'.mypy_cache',
)
@_invoke.task()
def soclean(ctx):
""" Wipe *.so files """
for name in ctx.shell.files('.', '*.pyd'):
ctx.shell.rm(name)
for name in ctx.shell.files('.', '*.so'):
ctx.shell.rm(name)
| apache-2.0 | -2,030,500,452,548,452,400 | 20.42029 | 68 | 0.523004 | false |
pkoutsias/SickRage | tests/sickrage_tests/helper/common_tests.py | 1 | 15228 | # coding=utf-8
# This file is part of SickRage.
#
# URL: https://SickRage.GitHub.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=line-too-long
"""
Test sickrage.common
"""
from __future__ import print_function
import unittest
import os
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
import sickbeard
from sickrage.helper.common import http_code_description, is_sync_file, is_torrent_or_nzb_file, pretty_file_size
from sickrage.helper.common import remove_extension, replace_extension, sanitize_filename, try_int, convert_size
class CommonTests(unittest.TestCase):
"""
Test common
"""
def test_http_code_description(self):
test_cases = {
None: None,
'': None,
'123': None,
'12.3': None,
'-123': None,
'-12.3': None,
'300': None,
0: None,
123: None,
12.3: None,
-123: None,
-12.3: None,
300: 'Multiple Choices',
451: '(Redirect, Unavailable For Legal Reasons)',
497: 'HTTP to HTTPS',
499: '(Client Closed Request, Token required)',
600: None,
}
unicode_test_cases = {
u'': None,
u'123': None,
u'12.3': None,
u'-123': None,
u'-12.3': None,
u'300': None,
}
for test in test_cases, unicode_test_cases:
for (http_code, result) in test.iteritems():
self.assertEqual(http_code_description(http_code), result)
def test_is_sync_file(self):
"""
Test is sync file
"""
sickbeard.SYNC_FILES = '!sync,lftp-pget-status,part'
test_cases = {
None: False,
42: False,
'': False,
'filename': False,
'.syncthingfilename': True,
'.syncthing.filename': True,
'.syncthing-filename': True,
'.!sync': True,
'file.!sync': True,
'file.!sync.ext': False,
'.lftp-pget-status': True,
'file.lftp-pget-status': True,
'file.lftp-pget-status.ext': False,
'.part': True,
'file.part': True,
'file.part.ext': False,
}
unicode_test_cases = {
u'': False,
u'filename': False,
u'.syncthingfilename': True,
u'.syncthing.filename': True,
u'.syncthing-filename': True,
u'.!sync': True,
u'file.!sync': True,
u'file.!sync.ext': False,
u'.lftp-pget-status': True,
u'file.lftp-pget-status': True,
u'file.lftp-pget-status.ext': False,
u'.part': True,
u'file.part': True,
u'file.part.ext': False,
}
for tests in test_cases, unicode_test_cases:
for (filename, result) in tests.iteritems():
self.assertEqual(is_sync_file(filename), result)
def test_is_torrent_or_nzb_file(self):
"""
Test is torrent or nzb file
"""
test_cases = {
None: False,
42: False,
'': False,
'filename': False,
'.nzb': True,
'file.nzb': True,
'file.nzb.part': False,
'.torrent': True,
'file.torrent': True,
'file.torrent.part': False,
}
unicode_test_cases = {
u'': False,
u'filename': False,
u'.nzb': True,
u'file.nzb': True,
u'file.nzb.part': False,
u'.torrent': True,
u'file.torrent': True,
u'file.torrent.part': False,
}
for tests in test_cases, unicode_test_cases:
for (filename, result) in tests.iteritems():
self.assertEqual(is_torrent_or_nzb_file(filename), result)
def test_pretty_file_size(self):
"""
Test pretty file size
"""
test_cases = {
None: '0.00 B',
'': '0.00 B',
'1024': '1.00 KB',
'1024.5': '1.00 KB',
-42.5: '0.00 B',
-42: '0.00 B',
0: '0.00 B',
25: '25.00 B',
25.5: '25.50 B',
2 ** 10: '1.00 KB',
50 * 2 ** 10 + 25: '50.02 KB',
2 ** 20: '1.00 MB',
100 * 2 ** 20 + 50 * 2 ** 10 + 25: '100.05 MB',
2 ** 30: '1.00 GB',
200 * 2 ** 30 + 100 * 2 ** 20 + 50 * 2 ** 10 + 25: '200.10 GB',
2 ** 40: '1.00 TB',
400 * 2 ** 40 + 200 * 2 ** 30 + 100 * 2 ** 20 + 50 * 2 ** 10 + 25: '400.20 TB',
2 ** 50: '1.00 PB',
800 * 2 ** 50 + 400 * 2 ** 40 + 200 * 2 ** 30 + 100 * 2 ** 20 + 50 * 2 ** 10 + 25: '800.39 PB',
2 ** 60: 2 ** 60,
}
unicode_test_cases = {
u'': '0.00 B',
u'1024': '1.00 KB',
u'1024.5': '1.00 KB',
}
for tests in test_cases, unicode_test_cases:
for (size, result) in tests.iteritems():
self.assertEqual(pretty_file_size(size), result)
def test_remove_extension(self):
"""
Test remove extension
"""
test_cases = {
None: None,
42: 42,
'': '',
'.': '.',
'filename': 'filename',
'.bashrc': '.bashrc',
'.nzb': '.nzb',
'file.nzb': 'file',
'file.name.nzb': 'file.name',
'.torrent': '.torrent',
'file.torrent': 'file',
'file.name.torrent': 'file.name',
'.avi': '.avi',
'file.avi': 'file',
'file.name.avi': 'file.name',
}
unicode_test_cases = {
u'': u'',
u'.': u'.',
u'filename': u'filename',
u'.bashrc': u'.bashrc',
u'.nzb': u'.nzb',
u'file.nzb': u'file',
u'file.name.nzb': u'file.name',
u'.torrent': u'.torrent',
u'file.torrent': u'file',
u'file.name.torrent': u'file.name',
u'.avi': u'.avi',
u'file.avi': u'file',
u'file.name.avi': u'file.name',
}
for tests in test_cases, unicode_test_cases:
for (extension, result) in tests.iteritems():
self.assertEqual(remove_extension(extension), result)
def test_replace_extension(self):
"""
Test replace extension
"""
test_cases = {
(None, None): None,
(None, ''): None,
(42, None): 42,
(42, ''): 42,
('', None): '',
('', ''): '',
('.', None): '.',
('.', ''): '.',
('.', 'avi'): '.',
('filename', None): 'filename',
('filename', ''): 'filename',
('filename', 'avi'): 'filename',
('.bashrc', None): '.bashrc',
('.bashrc', ''): '.bashrc',
('.bashrc', 'avi'): '.bashrc',
('file.mkv', None): 'file.None',
('file.mkv', ''): 'file.',
('file.mkv', 'avi'): 'file.avi',
('file.name.mkv', None): 'file.name.None',
('file.name.mkv', ''): 'file.name.',
('file.name.mkv', 'avi'): 'file.name.avi',
}
unicode_test_cases = {
(None, u''): None,
(42, u''): 42,
('', u''): '',
(u'', None): u'',
(u'', ''): u'',
(u'', u''): u'',
('.', u''): '.',
('.', u'avi'): '.',
(u'.', None): u'.',
(u'.', ''): u'.',
(u'.', u''): u'.',
(u'.', 'avi'): u'.',
(u'.', u'avi'): u'.',
('filename', u''): 'filename',
('filename', u'avi'): 'filename',
(u'filename', None): u'filename',
(u'filename', ''): u'filename',
(u'filename', u''): u'filename',
(u'filename', 'avi'): u'filename',
(u'filename', u'avi'): u'filename',
('.bashrc', u''): '.bashrc',
('.bashrc', u'avi'): '.bashrc',
(u'.bashrc', None): u'.bashrc',
(u'.bashrc', ''): u'.bashrc',
(u'.bashrc', u''): u'.bashrc',
(u'.bashrc', 'avi'): u'.bashrc',
(u'.bashrc', u'avi'): u'.bashrc',
('file.mkv', u''): 'file.',
('file.mkv', u'avi'): 'file.avi',
(u'file.mkv', None): u'file.None',
(u'file.mkv', ''): u'file.',
(u'file.mkv', u''): u'file.',
(u'file.mkv', 'avi'): u'file.avi',
(u'file.mkv', u'avi'): u'file.avi',
('file.name.mkv', u''): 'file.name.',
('file.name.mkv', u'avi'): 'file.name.avi',
(u'file.name.mkv', None): u'file.name.None',
(u'file.name.mkv', ''): u'file.name.',
(u'file.name.mkv', u''): u'file.name.',
(u'file.name.mkv', 'avi'): u'file.name.avi',
(u'file.name.mkv', u'avi'): u'file.name.avi',
}
for tests in test_cases, unicode_test_cases:
for ((filename, extension), result) in tests.iteritems():
self.assertEqual(replace_extension(filename, extension), result)
def test_sanitize_filename(self):
"""
Test sanitize filename
"""
test_cases = {
None: '',
42: '',
'': '',
'filename': 'filename',
'fi\\le/na*me': 'fi-le-na-me',
'fi:le"na<me': 'filename',
'fi>le|na?me': 'filename',
' . file\u2122name. .': 'file-u2122name', # pylint: disable=anomalous-unicode-escape-in-string
}
unicode_test_cases = {
u'': u'',
u'filename': u'filename',
u'fi\\le/na*me': u'fi-le-na-me',
u'fi:le"na<me': u'filename',
u'fi>le|na?me': u'filename',
u' . file\u2122name. .': u'filename',
}
for tests in test_cases, unicode_test_cases:
for (filename, result) in tests.iteritems():
self.assertEqual(sanitize_filename(filename), result)
def test_try_int(self):
"""
Test try int
"""
test_cases = {
None: 0,
'': 0,
'123': 123,
'-123': -123,
'12.3': 0,
'-12.3': 0,
0: 0,
123: 123,
-123: -123,
12.3: 12,
-12.3: -12,
}
unicode_test_cases = {
u'': 0,
u'123': 123,
u'-123': -123,
u'12.3': 0,
u'-12.3': 0,
}
for test in test_cases, unicode_test_cases:
for (candidate, result) in test.iteritems():
self.assertEqual(try_int(candidate), result)
def test_try_int_with_default(self):
"""
Test try int
"""
default_value = 42
test_cases = {
None: default_value,
'': default_value,
'123': 123,
'-123': -123,
'12.3': default_value,
'-12.3': default_value,
0: 0,
123: 123,
-123: -123,
12.3: 12,
-12.3: -12,
}
unicode_test_cases = {
u'': default_value,
u'123': 123,
u'-123': -123,
u'12.3': default_value,
u'-12.3': default_value,
}
for test in test_cases, unicode_test_cases:
for (candidate, result) in test.iteritems():
self.assertEqual(try_int(candidate, default_value), result)
def test_convert_size(self):
# converts pretty file sizes to integers
self.assertEqual(convert_size('1 B'), 1)
self.assertEqual(convert_size('1 KB'), 1024)
self.assertEqual(convert_size('1 kb', use_decimal=True), 1000) # can use decimal units (e.g. KB = 1000 bytes instead of 1024)
# returns integer sizes for integers
self.assertEqual(convert_size(0, -1), 0)
self.assertEqual(convert_size(100, -1), 100)
self.assertEqual(convert_size(1.312, -1), 1) # returns integer sizes for floats too
# without a default value, failures return None
self.assertEqual(convert_size('pancakes'), None)
# default value can be anything
self.assertEqual(convert_size(None, -1), -1)
self.assertEqual(convert_size('', 3.14), 3.14)
self.assertEqual(convert_size('elephant', 'frog'), 'frog')
# negative sizes return 0
self.assertEqual(convert_size(-1024, -1), 0)
self.assertEqual(convert_size('-1 GB', -1), 0)
# can also use `or` for a default value
self.assertEqual(convert_size(None) or 100, 100)
self.assertEqual(convert_size(None) or 1.61803, 1.61803) # default doesn't have to be integer
self.assertEqual(convert_size(None) or '100', '100') # default doesn't have to be numeric either
self.assertEqual(convert_size('-1 GB') or -1, -1) # can use `or` to provide a default when size evaluates to 0
# default units can be kwarg'd
self.assertEqual(convert_size('1', default_units='GB'), convert_size('1 GB'))
# separator can be kwarg'd
self.assertEqual(convert_size('1?GB', sep='?'), convert_size('1 GB'))
# can use custom dictionary to support internationalization
french = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']
self.assertEqual(convert_size('1 o', units=french), 1)
self.assertEqual(convert_size('1 go', use_decimal=True, units=french), 1000000000)
self.assertEqual(convert_size('1 o'), None) # Wrong units so result is None
# custom units need to be uppercase or they won't match
oops = ['b', 'kb', 'Mb', 'Gb', 'tB', 'Pb']
self.assertEqual(convert_size('1 b', units=oops), None)
self.assertEqual(convert_size('1 B', units=oops), None)
self.assertEqual(convert_size('1 Mb', units=oops), None)
self.assertEqual(convert_size('1 MB', units=oops), None)
if __name__ == '__main__':
print('=====> Testing %s' % __file__)
SUITE = unittest.TestLoader().loadTestsFromTestCase(CommonTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-3.0 | 657,268,527,341,937,400 | 32.690265 | 134 | 0.472157 | false |
alex-bauer/kelvin-power-challenge | src/config.py | 1 | 1408 | """Configuration of directories, target column names, cross-validation folds"""
import os
import numpy as np
from datetime import datetime
class ParamConfig:
def __init__(self):
self.target_cols = ['NPWD2372', 'NPWD2401', 'NPWD2402', 'NPWD2451', 'NPWD2471', 'NPWD2472', 'NPWD2481',
'NPWD2482', 'NPWD2491', 'NPWD2501', 'NPWD2531', 'NPWD2532', 'NPWD2551', 'NPWD2552',
'NPWD2561', 'NPWD2562', 'NPWD2691', 'NPWD2692', 'NPWD2721', 'NPWD2722',
'NPWD2742', 'NPWD2771', 'NPWD2791', 'NPWD2792', 'NPWD2801', 'NPWD2802', 'NPWD2821',
'NPWD2851', 'NPWD2852', 'NPWD2871', 'NPWD2872', 'NPWD2881', 'NPWD2882']
## path
self.data_folder = "../../data"
self.features_folder = "%s/features" % self.data_folder
self.featuresets_folder = "%s/featuresets" % self.data_folder
self.models_folder = "%s/models" % self.data_folder
self.level1_models_folder = "%s/level1" % self.models_folder
self.model_config_folder = "../../model_config"
self.log_folder = "../../log/"
self.folds = [(datetime(2011, 01, 01), datetime(2011, 12, 31)),
(datetime(2012, 01, 01), datetime(2012, 12, 31)),
(datetime(2013, 01, 01), datetime(2013, 12, 31))]
## initialize a param config
config = ParamConfig()
| mit | -2,190,644,668,626,966,500 | 43 | 111 | 0.568892 | false |
google/it-cert-automation | Course2/snippets/C2M3.py | 1 | 8908 | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# These are the snippets shown during the demo videos in C2M3
# Each snippet is followed by the corresponding output when executed in the
# Python interpreter.
log = "July 31 07:51:48 mycomputer bad_process[12345]: ERROR Performing package upgrade"
# >>> log = "July 31 07:51:48 mycomputer bad_process[12345]: ERROR Performing package upgrade"
# >>> index = log.index("[")
# >>> print(log[index+1:index+6])
# 12345
import re
log = "July 31 07:51:48 mycomputer bad_process[12345]: ERROR Performing package upgrade"
regex = r"\[(\d+)\]"
result = re.search(regex, log)
print(result[1])
# >>> import re
# >>> log = "July 31 07:51:48 mycomputer bad_process[12345]: ERROR Performing package upgrade"
# >>> regex = r"\[(\d+)\]"
# >>> result = re.search(regex, log)
# >>> print(result[1])
# 12345
#
# >>> import re
# >>> result = re.search(r"aza", "plaza")
#
# >>> print(result)
# <re.Match object; span=(2, 5), match='aza'>
#
# >>> result = re.search(r"aza", "bazaar")
# >>> print(result)
# <re.Match object; span=(1, 4), match='aza'>
#
# >>> result = re.search(r"aza", "maze")
# >>> print(result)
# None
#
# >>> print(re.search(r"^x", "xenon"))
# <re.Match object; span=(0, 1), match='x'>
#
# >>> print(re.search(r"p.ng", "penguin"))
# <re.Match object; span=(0, 4), match='peng'>
#
# >>> print(re.search(r"p.ng", "clapping"))
# <re.Match object; span=(4, 8), match='ping'>
# >>> print(re.search(r"p.ng", "sponge"))
# <re.Match object; span=(1, 5), match='pong'>
#
# >>> print(re.search(r"p.ng", "Pangaea", re.IGNORECASE))
# <re.Match object; span=(0, 4), match='Pang'>
#
# >>> print(re.search(r"[Pp]ython", "Python"))
# <re.Match object; span=(0, 6), match='Python'>
#
# >>> print(re.search(r"[a-z]way", "The end of the highway"))
# <re.Match object; span=(18, 22), match='hway'>
#
# >>> print(re.search(r"[a-z]way", "What a way to go"))
# None
#
# >>> print(re.search("cloud[a-zA-Z0-9]", "cloudy"))
# <re.Match object; span=(0, 6), match='cloudy'>
# >>> print(re.search("cloud[a-zA-Z0-9]", "cloud9"))
# <re.Match object; span=(0, 6), match='cloud9'>
#
# >>> print(re.search(r"[^a-zA-Z]", "This is a sentence with spaces."))
# <re.Match object; span=(4, 5), match=' '>
#
# >>> print(re.search(r"[^a-zA-Z ]", "This is a sentence with spaces."))
# <re.Match object; span=(30, 31), match='.'>
#
# >>> print(re.search(r"cat|dog", "I like cats."))
# <re.Match object; span=(7, 10), match='cat'>
#
# >>> print(re.search(r"cat|dog", "I love dogs!"))
# <re.Match object; span=(7, 10), match='dog'>
#
# >>> print(re.search(r"cat|dog", "I like both dogs and cats."))
# <re.Match object; span=(12, 15), match='dog'>
#
# >>> print(re.findall(r"cat|dog", "I like both dogs and cats."))
# ['dog', 'cat']
#
# >>> print(re.search(r"Py.*n", "Pygmalion"))
# <re.Match object; span=(0, 9), match='Pygmalion'>
#
# >>> print(re.search(r"Py.*n", "Python Programming"))
# <re.Match object; span=(0, 17), match='Python Programmin'>
#
# >>> print(re.search(r"Py[a-z]*n", "Python Programming"))
# <re.Match object; span=(0, 6), match='Python'>
#
# >>> print(re.search(r"Py[a-z]*n", "Pyn"))
# <re.Match object; span=(0, 3), match='Pyn'>
#
# >>> print(re.search(r"o+l+", "goldfish"))
# <re.Match object; span=(1, 3), match='ol'>
#
# >>> print(re.search(r"o+l+", "woolly"))
# <re.Match object; span=(1, 5), match='ooll'>
#
# >>> print(re.search(r"o+l+", "boil"))
# None
#
# >>> print(re.search(r"p?each", "To each their own"))
# <re.Match object; span=(3, 7), match='each'>
#
# >>> print(re.search(r"p?each", "I like peaches"))
# <re.Match object; span=(7, 12), match='peach'>
#
#
# >>> print(re.search(r".com", "welcome"))
# <re.Match object; span=(2, 6), match='lcom'>
#
# >>> print(re.search(r"\.com", "welcome"))
# None
#
# >>> print(re.search(r"\.com", "mydomain.com"))
# <re.Match object; span=(8, 12), match='.com'>
#
# >>> print(re.search(r"\w*", "This is an example"))
# <re.Match object; span=(0, 4), match='This'>
#
# >>> print(re.search(r"\w*", "And_this_is_another"))
# <re.Match object; span=(0, 19), match='And_this_is_another'>
#
# >>> print(re.search(r"A.*a", "Argentina"))
# <re.Match object; span=(0, 9), match='Argentina'>
#
# >>> print(re.search(r"A.*a", "Azerbaijan"))
# <re.Match object; span=(0, 9), match='Azerbaija'>
#
# >>> print(re.search(r"^A.*a$", "Azerbaijan"))
# None
#
# >>> print(re.search(r"^A.*a$", "Australia"))
# <re.Match object; span=(0, 9), match='Australia'>
#
# >>> pattern = r"^[a-zA-Z_][a-zA-Z0-9_]*$"
#
# >>> print(re.search(pattern, "_this_is_a_valid_variable_name"))
# <re.Match object; span=(0, 30), match='_this_is_a_valid_variable_name'>
#
# >>> print(re.search(pattern, "this isn't a valid variable"))
# None
#
# >>> print(re.search(pattern, "my_variable1"))
# <re.Match object; span=(0, 12), match='my_variable1'>
#
# >>> print(re.search(pattern, "2my_variable1"))
# None
#
# >>> result = re.search(r"^(\w*), (\w*)$", "Lovelace, Ada")
# >>> print(result)
# <re.Match object; span=(0, 13), match='Lovelace, Ada'>
#
# >>> print(result.groups())
# ('Lovelace', 'Ada')
#
# >>> print(result[0])
# Lovelace, Ada
#
# >>> print(result[1])
# Lovelace
# >>> print(result[2])
# Ada
#
# >>> "{} {}".format(result[2], result[1])
# 'Ada Lovelace'
#
# >>> def rearrange_name(name):
# ... result = re.search(r"^(\w*), (\w*)$", name)
# ... if result == None:
# ... return name
# ... return "{} {}".format(result[2], result[1])
# ...
#
# >>> rearrange_name("Lovelace, Ada")
# 'Ada Lovelace'
#
# >>> rearrange_name("Richie, Dennis")
# 'Dennis Richie'
#
# >>> rearrange_name("Hopper, Grace M.")
# 'Hopper, Grace M.'
def rearrange_name(name):
result = re.search(r"^([\w .-]*), ([\w .-]*)$", name)
if result == None:
return name
return "{} {}".format(result[2], result[1])
# >>> def rearrange_name(name):
# ... result = re.search(r"^([\w .-]*), ([\w .-]*)$", name)
# ... if result == None:
# ... return name
# ... return "{} {}".format(result[2], result[1])
#
# >>> rearrange_name("Hopper, Grace M.")
# 'Grace M. Hopper'
#
# >>> print(re.search(r"[a-zA-Z]{5}", "a ghost"))
# <re.Match object; span=(2, 7), match='ghost'>
#
# >>> print(re.search(r"[a-zA-Z]{5}", "a scary ghost appeared"))
# <re.Match object; span=(2, 7), match='scary'>
#
# >>> print(re.findall(r"[a-zA-Z]{5}", "a scary ghost appeared"))
# ['scary', 'ghost', 'appea']
#
# >>> print(re.findall(r"\b[a-zA-Z]{5}\b", "A scary ghost appeared"))
# ['scary', 'ghost']
#
# >>> print(re.findall(r"\w{5,10}", "I really like strawberries"))
# ['really', 'strawberri']
#
# >>> print(re.findall(r"\w{5,}", "I really like strawberries"))
# ['really', 'strawberries']
#
# >>> print(re.search(r"s\w{,20}", "I really like strawberries"))
# <re.Match object; span=(14, 26), match='strawberries'>
import re
log = "July 31 07:51:48 mycomputer bad_process[12345]: ERROR Performing package upgrade"
regex = r"\[(\d+)\]"
result = re.search(regex, log)
print(result[1])
# >>> import re
# >>> log = "July 31 07:51:48 mycomputer bad_process[12345]: ERROR Performing package upgrade"
# >>> regex = r"\[(\d+)\]"
# >>> result = re.search(regex, log)
# >>> print(result[1])
# 12345
#
# >>> result = re.search(regex, "A completely different string that also has numbers [34567]")
# >>> print(result[1])
# 34567
#
# >>> result = re.search(regex, "99 elephants in a [cage]")
# >>> print(result[1])
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: 'NoneType' object is not subscriptable
#
# >>> def extract_pid(log_line):
# ... regex = r"\[(\d+)\]"
# ... result = re.search(regex, log_line)
# ... if result is None:
# ... return None
# ... return result[1]
# ...
#
# >>> print(extract_pid(log))
# 12345
#
# >>> print(extract_pid("99 elephants in a [cage]"))
#
# >>> re.split(r"[.?!]", "One sentence. Another one? And the last one!")
# ['One sentence', ' Another one', ' And the last one', '']
#
# >>> re.split(r"([.?!])", "One sentence. Another one? And the last one!")
# ['One sentence', '.', ' Another one', '?', ' And the last one', '!', '']
#
# >>> re.sub(r"[\w.%+-]+@[\w.-]+", "[REDACTED]", "Received an email for [email protected]")
# 'Received an email for [REDACTED]'
#
# >>> re.sub(r"^([\w .-]*), ([\w .-]*)$", r"\2 \1", "Lovelace, Ada")
# 'Ada Lovelace'
| apache-2.0 | -5,175,144,962,532,922,000 | 30.038328 | 98 | 0.584306 | false |
wangzheng0822/algo | python/31_bfs_dfs/bfs_dfs.py | 1 | 2500 | """
Breadth-first search and depth-first search.
Author: Wenru Dong
"""
from typing import List, Optional, Generator, IO
from collections import deque
class Graph:
"""Undirected graph."""
def __init__(self, num_vertices: int):
self._num_vertices = num_vertices
self._adjacency = [[] for _ in range(num_vertices)]
def add_edge(self, s: int, t: int) -> None:
self._adjacency[s].append(t)
self._adjacency[t].append(s)
def _generate_path(self, s: int, t: int, prev: List[Optional[int]]) -> Generator[str, None, None]:
if prev[t] or s != t:
yield from self._generate_path(s, prev[t], prev)
yield str(t)
def bfs(self, s: int, t: int) -> IO[str]:
"""Print out the path from Vertex s to Vertex t
using bfs.
"""
if s == t: return
visited = [False] * self._num_vertices
visited[s] = True
q = deque()
q.append(s)
prev = [None] * self._num_vertices
while q:
v = q.popleft()
for neighbour in self._adjacency[v]:
if not visited[neighbour]:
prev[neighbour] = v
if neighbour == t:
print("->".join(self._generate_path(s, t, prev)))
return
visited[neighbour] = True
q.append(neighbour)
def dfs(self, s: int, t: int) -> IO[str]:
"""Print out a path from Vertex s to Vertex t
using dfs.
"""
found = False
visited = [False] * self._num_vertices
prev = [None] * self._num_vertices
def _dfs(from_vertex: int) -> None:
nonlocal found
if found: return
visited[from_vertex] = True
if from_vertex == t:
found = True
return
for neighbour in self._adjacency[from_vertex]:
if not visited[neighbour]:
prev[neighbour] = from_vertex
_dfs(neighbour)
_dfs(s)
print("->".join(self._generate_path(s, t, prev)))
if __name__ == "__main__":
graph = Graph(8)
graph.add_edge(0, 1)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(1, 4)
graph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(4, 5)
graph.add_edge(4, 6)
graph.add_edge(5, 7)
graph.add_edge(6, 7)
graph.bfs(0, 7)
graph.dfs(0, 7)
| apache-2.0 | 5,939,056,272,822,307,000 | 27.409091 | 102 | 0.5048 | false |
angelcoto/armoni | armoni.py | 1 | 18111 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# armoni.py
#
# Copyright 2012-2014 Ángel Coto <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details (http://www.gnu.org/licenses/gpl.txt)
#
# Descripcion:
# Este programa verifica periódicamente los archivos que se defina en archivo de
# configuración y emite alertas si han cambiado o no. Las alerta que se generan
# por defecto son ante cambio de los archivos, pero opcionalmente puede indicarse
# por parámetro de línea de comando que emita alertas ante no cambio. Las alertas
# son registradas en log también son enviadas por correo.
# Historial de versión
# 1.0.1: Incorpora los destinatarios en el mensaje que se guarda en log de eventos,
# relativo a la notificación de cumplimiento de regla
# 1.1.0: * Simplifica el método de comparación
# * Actualiza el listado de archivos cada vez que se hace ronda de monitoreo.
# Esto funciona en modalidad de directorio.
import os
import smtplib
from artamiz import calcsum, enllavado
from ConfigParser import SafeConfigParser
from time import sleep, localtime, strftime
from sys import argv
from getpass import getpass
from base64 import standard_b64decode, standard_b64encode
from email.mime.text import MIMEText
from email.Header import Header
from socket import gethostname
### Define la versión del programa
Programa = 'armoni'
Ver = '1.1.0 (beta)'
Copyright = 'Copyright (c) 2012-2014 Angel Coto <[email protected]>'
Maquina = gethostname()
### Inicializa variables de mensajes
Error1 = "* Error 1: Error al leer archivo '{0}'."
Error2 = "* Error 2: El campo '{0}' no tiene formáto válido."
Error3 = "* Error 3: '{0}' no es directorio."
Error4 = "* Error 4: '{0}' no es un archivo."
Error5 = "* Error 5: '{0}' no es valor esperado para '{1}'."
MensajeLog1 = "{0}\t{1}\t{2}\t{3}" #Mensaje que se graba en el log de monitoreo
ErrorLog1 = "{0}\tERROR\tError en la comunicación o autenticación con el servidor de correo"
ErrorLog2 = "{0}\tERROR\tError al intentar enviar el mensaje luego de contactar exitosamente al servidor de correo"
ErrorLog3 = "{0}\tERROR\t{1} finalizó debido a errores en archivo de ini"
ErrorLog4 = "{0}\tERROR\t{1} finalizó porque ninguno de los archivos se puede analizar"
ErrorLog5 = "{0}\tERROR\tNo se pudo verificar archivos\t{1}"
EventoLog0 = "{0}\tINFORMATIVO\t{1} inició con éxito con parámetros\t{2}\t{3}\t{4}\t{5}"
EventoLog1 = "{0}\tINFORMATIVO\tSe notificó cumplimiento de la regla\t{1}\t{2}"
EventoLog2 = "{0}\tINFORMATIVO\tNo fue posible notificar cumplimiento de la regla\t{1}"
EventoLog3 = "{0}\tINFORMATIVO\tSe notificó el inicio de {1}\t{2}"
EventoLog4 = "{0}\tINFORMATIVO\tNo fue posible notificar el inicio de {1}"
EventoLog5 = "{0}\tINFORMATIVO\tSe excluyen archivos del monitoreo\t{1}"
EventoLog6 = "{0}\tINFORMATIVO\tInicio de ciclo de verificación"
EventoLog7 = "{0}\tINFORMATIVO\tFin de ciclo de verificación"
EventoLog100 = "{0}\tINFORMATIVO\t{1} fue detenido"
class Correo:
def __init__(self, Servidor, Puerto, Cuenta, Pwd = None):
self.Cuenta = Cuenta
self.Pwd = Pwd
self.Servidor = Servidor
self.Puerto = Puerto
self.Asunto = ''
self.Mensaje = ''
def CreaMensaje(self, Mensaje): #Método genérico para cualquier mensaje preelaborado
self.Mensaje = Mensaje
def CreaAsunto(self, Asunto): #Método genérico para cualquier asunto preelaborado
self.Asunto = Asunto
def CreaAsuntoLog(self, CausaAlerta): #Método específico para crear el asunto de correo de alerta
if CausaAlerta == 'cambio':
self.Asunto = Programa + '@' + Maquina + ': ** Reportando cambios en archivos'
else:
self.Asunto = Programa + '@' + Maquina + ': ** Reportando archivos que no han cambiado'
def CreaMensajeLog(self, Archivos, CausaAlerta, Intervalo, Hora): #Método específico para crear mensaje de correo de alerta
self.Mensaje = '------------ Reporte de ' + Programa + '@' + Maquina + ' en fecha ' + Hora + ' ------------\n\n'
if CausaAlerta == 'cambio':
self.Mensaje = self.Mensaje + 'Se detectó que los siguientes archivos se modificaron en los últimos ' + str(Intervalo) + ' minutos:\n\n'
else:
self.Mensaje = self.Mensaje + 'Se detectó que los siguientes archivos no han cambiado en los últimos ' + str(Intervalo) + ' minutos:\n\n'
Parrafo = ''
for Archivo in Archivos:
Parrafo = Parrafo + ' * ' + Archivo + '\n'
self.Mensaje = self.Mensaje + Parrafo + '\n' + Programa + '-' + Ver
def EnviarCorreo(self, Remitente, Destinatarios): #Método genérico para enviar correo
# Construye el mensaje simple (texto y sin adjunto)
Asunto = self.Asunto.decode('utf-8')
Asunto = Header(Asunto,'utf-8')
Mensaje = MIMEText(self.Mensaje,'plain','utf-8')
Mensaje['From'] = Remitente
Mensaje['To'] = Remitente
Mensaje['Subject'] = Asunto
Mensaje = Mensaje.as_string()
# Conecta con el servidor de correo
if self.Servidor == 'smtp.gmail.com':
try:
mailServer = smtplib.SMTP(self.Servidor,self.Puerto)
mailServer.starttls()
mailServer.login(self.Cuenta, standard_b64decode(self.Pwd))
except:
return 1
else:
try:
mailServer = smtplib.SMTP(self.Servidor, self.Puerto)
# mailServer.set_debuglevel(True) #Usar en caso de requerir ver comunicación con server
except:
return 1
# Envía el mensaje
try:
mailServer.sendmail(Remitente, Destinatarios, Mensaje)
return 0
except:
return 2
finally:
mailServer.quit()
class Log:
def __init__(self, Archivo):
self.Archivo = Archivo
self.TamanoMaximo = 1048576
def GrabaRegistroLog(self, Registro):
ArchivoLog = open(self.Archivo, 'a')
ArchivoLog.write(Registro + '\n')
ArchivoLog.close()
if self.VerificaTamano():
self.RenombraLog()
def VerificaTamano(self):
if os.path.getsize(self.Archivo) >= self.TamanoMaximo:
return True
else:
return False
def RenombraLog(self):
Parte1 = os.path.splitext(os.path.basename(self.Archivo))[0]
Extension = os.path.splitext(os.path.basename(self.Archivo))[1]
Complemento = hora = strftime("_%Y%m%d_%H%M%S", localtime())
Nuevonombre = Parte1 + Complemento + Extension
os.rename(self.Archivo,Nuevonombre)
class Parametros:
def __init__(self, Ini, TipoObjeto):
self.ArchivoIni = Ini
self.Error = False
if os.path.isfile(self.ArchivoIni):
if TipoObjeto == 'directorio':
self.Directorios = self.LeeLista('datos_monitoreo','directorios')
if self.Directorios <> False:
self.ValidaDirectorios()
else:
self.Archivos = self.LeeLista('datos_monitoreo','archivos')
if self.Archivos <> False:
self.ValidaArchivos()
self.MinutosIntervalo = self.LeeNumerico('datos_monitoreo','minutos_intervalo')
self.Intervalo = self.MinutosIntervalo * 60
self.Servidor = self.LeeString('datos_servidor_correo', 'servidor')
self.RequiereAutenticacion = self.LeeString('datos_servidor_correo','requiere_autenticacion', ['si', 'no'])
self.Puerto = self.LeeNumerico('datos_servidor_correo', 'puerto')
self.Cuenta = self.LeeString('datos_servidor_correo', 'cuenta')
self.De = self.LeeString('datos_correo', 'de')
self.Para = self.LeeLista('datos_correo', 'para')
self.ParaAdmin = self.LeeLista('datos_correo', 'para_admin')
else:
print(error1.format(self.ArchivoIni))
self.Error = True
def ValidaDirectorios(self):
for Directorio in self.Directorios:
if not os.path.isdir(Directorio):
print(Error3.format(Directorio))
self.Error = True
if self.Error:
return False
else:
return True
def ValidaArchivos(self):
for Archivo in self.Archivos:
if not os.path.isfile(Archivo):
print(Error4.format(Archivo))
self.Error = True
if self.Error:
return False
else:
return True
def LeeLista(self, seccion, opcion):
parser = SafeConfigParser()
parser.read(self.ArchivoIni)
valor = parser.get(seccion,opcion).strip()
cadena = ''
Lista = []
if valor.strip() <> '':
for caracter in valor:
if caracter <> ';':
cadena = cadena + caracter
else:
Lista.append(cadena.strip())
cadena = ''
Lista.append(cadena.strip())
return Lista
else:
print(Error2.format(opcion))
self.Error = True
return False
def LeeString(self, seccion, opcion, valores = None):
parser = SafeConfigParser()
parser.read(self.ArchivoIni)
MiString = parser.get(seccion,opcion)
MiString = MiString.strip()
if MiString <> '':
ValorValido = True
if valores <> None:
if MiString not in valores:
ValorValido = False
if ValorValido:
return MiString
else:
print(Error5.format(MiString,opcion))
self.Error = True
return False
else:
print(Error2.format(opcion))
self.Error = True
return False
def LeeNumerico(self, seccion, opcion):
parser = SafeConfigParser()
parser.read(self.ArchivoIni)
Numero = 0
try:
Numero = int(parser.get(seccion,opcion))
return Numero
except:
print(Error2.format(opcion))
self.Error = True
return False
class Monitor:
def __init__(self):
self.Archivos = []
self.ArchivosError = []
def ArchivoVerificable(self, Archivo):
if os.path.isfile(Archivo):
if os.access(Archivo, os.R_OK):
if not enllavado(Archivo):
Verificable = True
else:
Verificable = False
self.ArchivosError.append([Archivo, 'enllavado'])
else:
Verificable = False
self.ArchivosError.append([Archivo, 'sinpermisolectura'])
else:
Verificable = False
self.ArchivosError.append([Archivo, 'noexiste'])
return Verificable
def CargaArchivos(self, TipoObjeto, Objetos): #Carga inicial de archivos y sus hash sha1
self.Archivos = []
Resultado = False
for Archivo in Objetos:
RegistroArchivo = []
RegistroArchivoError = []
if os.path.isfile(Archivo): # Si el archivo existe
if os.access(Archivo,os.R_OK): # Si tiene permiso de lectura
if not enllavado(Archivo): #Si no está enllavado (comprobado con función de artamiz)
RegistroArchivo.append(Archivo)
RegistroArchivo.append(calcsum(Archivo,'a','sha1')) #Guarda el hash sha1 del archivo
self.Archivos.append(RegistroArchivo)
else:
RegistroArchivoError.append(Archivo)
RegistroArchivoError.append('enllavado')
self.ArchivosError.append(RegistroArchivoError)
else:
RegistroArchivoError.append(Archivo)
RegistroArchivoError.append('sinpermisolectura')
self.ArchivosError.append(RegistroArchivoError)
if self.Archivos:
Resultado = True
return Resultado
def VerificaArchivos(self, CausaAlerta):
Indice = 0
Alerta = False
Alertas = []
self.ArchivosError = []
for Archivo in self.Archivos: #Recorre la lista de archivos
if self.ArchivoVerificable(Archivo[0]):
NuevoHash = calcsum(Archivo[0], 'a', 'sha1')
if CausaAlerta == 'nocambio':
if Archivo[1] == NuevoHash:
Alerta = True
Alertas.append(Archivo[0])
elif CausaAlerta == 'cambio':
if Archivo[1] <> NuevoHash:
Alerta = True
Alertas.append(Archivo[0])
else:
None
self.Archivos[Indice] = [Archivo[0], NuevoHash]
Indice = Indice + 1
return Alerta, Alertas
def main():
def HintDeUso():
print(' Monitorea la variación de archivos.\n')
print(' Uso: python {0} [?,-nC, -a]\n'.format(Programa))
print(' Opciones:')
print(' <ninguna>: Alerta si hay cambios en directorios.')
print(' -nC: Alerta cuando no hay cambios en los objetos monitoreados.')
print(' -a: Monitorea archivos en lugar de directorios completos.')
print(' ?: Muestra esta ayuda.\n')
print(' Este programa es software libre bajo licencia GPLv3.\n')
def PantallaInicial():
if os.name == 'posix':
os.system('clear')
elif os.name == 'nt':
os.system('cls')
else:
None
print('{0} {1}. {2}\n'.format(Programa,Ver,Copyright))
def LeeParametrosLc():
CausaAlerta = 'cambio'
TipoObjeto = 'directorio'
ParametroOk = True
try:
ar1 = argv[1]
if argv[1] == '-nC':
CausaAlerta = 'nocambio'
elif argv[1] == '-a':
TipoObjeto = 'archivo'
else:
ParametroOk = False
except:
None
if ParametroOk:
try:
ar2 = argv[2]
if ar2 == '-nC':
CausaAlerta = 'nocambio'
elif ar2 == '-a':
TipoObjeto = 'archivo'
else:
ParametroOk = False
except:
None
return ParametroOk, CausaAlerta, TipoObjeto
def HoraTexto():
return strftime('%Y-%m-%d %H:%M:%S', localtime())
def ImprimeLinea():
print('------------------------------------------------------------------------------')
def CargaInicial():
if TipoObjeto == 'directorio':
Archivos = []
for Directorio in ParametrosIni.Directorios:
ListaArchivos = os.listdir(Directorio)
for Archivo in ListaArchivos:
Archivos.append(os.path.join(Directorio, Archivo))
ResultadoCarga = MiMonitor.CargaArchivos(TipoObjeto, Archivos)
else:
ResultadoCarga = MiMonitor.CargaArchivos(TipoObjeto, ParametrosIni.Archivos)
if MiMonitor.ArchivosError:
PreparaRegistroErr(EventoLog5.format(HoraTexto(),MiMonitor.ArchivosError))
return ResultadoCarga
def PreparaRegistroErr(Registro):
LogServicio.GrabaRegistroLog(Registro)
print(Registro)
def PreparaRegistroLog(Archivo, Hora, Causa):
RegistroLog = MensajeLog1.format(Hora,Causa,Archivo,ParametrosIni.MinutosIntervalo)
LogMonitoreo.GrabaRegistroLog(RegistroLog)
print(RegistroLog)
def PreparaCorreoLog(Alertas, CausaAlerta, Hora):
MiCorreo.CreaAsuntoLog(CausaAlerta)
MiCorreo.CreaMensajeLog(Alertas, CausaAlerta, ParametrosIni.MinutosIntervalo, Hora)
ResultadoEnvio = MiCorreo.EnviarCorreo(ParametrosIni.De, ParametrosIni.Para)
Hora = HoraTexto() #Actualiza la hora para el registro de eventos
if ResultadoEnvio == 0:
PreparaRegistroErr(EventoLog1.format(Hora,CausaAlerta,ParametrosIni.Para))
elif ResultadoEnvio == 1:
PreparaRegistroErr(EventoLog2.format(Hora,CausaAlerta))
PreparaRegistroErr(ErrorLog1.format(Hora))
else:
PreparaRegistroErr(EventoLog2.format(Hora,CausaAlerta))
PreparaRegistroErr(ErrorLog2.format(Hora))
def InformaInicio(Hora):
if TipoObjeto == 'directorio':
Objetos = str(ParametrosIni.Directorios)
else:
Objetos = str(ParametrosIni.Archivos)
PreparaRegistroErr(EventoLog0.format(Hora,Programa,CausaAlerta,ParametrosIni.MinutosIntervalo,TipoObjeto,Objetos))
Texto = Programa + '@' + Maquina + ': ** Se inició el servicio'
MiCorreo.CreaAsunto(Texto)
Texto = 'El servicio ' + Programa + '-' + Ver + ' inició.\n\n'
Texto = Texto + 'Equipo : ' + Maquina + '\n'
Texto = Texto + 'Hora : ' + Hora + '\n'
Texto = Texto + 'Regla : ' + CausaAlerta + '\n'
Texto = Texto + 'Tipo objeto: ' + TipoObjeto + '\n'
if TipoObjeto == 'directorio':
Texto = Texto + 'Directorios: ' + str(ParametrosIni.Directorios) + '\n\n'
else:
Texto = Texto + 'Archivos : ' + str(ParametrosIni.Archivos) + '\n\n'
Texto = Texto + 'La actividad del monitoreo se puede consultar en los log del servicio.'
MiCorreo.CreaMensaje(Texto)
ResultadoEnvio = MiCorreo.EnviarCorreo(ParametrosIni.De, ParametrosIni.ParaAdmin)
Hora = HoraTexto() #Actualiza la hora para el log de eventos
if ResultadoEnvio == 0:
PreparaRegistroErr(EventoLog3.format(Hora, Programa, ParametrosIni.ParaAdmin))
elif ResultadoEnvio == 1:
PreparaRegistroErr(EventoLog4.format(Hora, Programa))
PreparaRegistroErr(ErrorLog1.format(Hora))
else:
PreparaRegistroErr(EventoLog4.format(Hora, Programa))
PreparaRegistroErr(ErrorLog2.format(Hora))
def MonitoreaArchivos():
PreparaRegistroErr(EventoLog6.format(HoraTexto()))
HayAlerta, Alertas = MiMonitor.VerificaArchivos(CausaAlerta)
Hora = HoraTexto()
for ArchivoError in MiMonitor.ArchivosError:
PreparaRegistroLog(ArchivoError[0], Hora, ArchivoError[1])
if HayAlerta:
for Archivo in Alertas:
PreparaRegistroLog(Archivo, Hora, CausaAlerta)
PreparaCorreoLog(Alertas, CausaAlerta, Hora)
# if HayAlerta or MiMonitor.ArchivosError:
# ImprimeLinea()
PreparaRegistroErr(EventoLog7.format(HoraTexto()))
ImprimeLinea()
try:
PantallaInicial()
ParametrosLcOk, CausaAlerta, TipoObjeto = LeeParametrosLc()
if ParametrosLcOk:
ParametrosIni = Parametros('armoni.ini', TipoObjeto) #Crea el objeto de parámetros
LogServicio = Log('armoni.err') #Para registrar eventos del servicio
if not ParametrosIni.Error:
LogMonitoreo = Log('armoni.log') #Para registrar las actividades del monitoreo
MiMonitor = Monitor() #Crea el objeto monitor
if ParametrosIni.RequiereAutenticacion == 'si':
Pwd = standard_b64encode(getpass("Password de '" + ParametrosIni.Cuenta + "': "))
MiCorreo = Correo(ParametrosIni.Servidor, ParametrosIni.Puerto, ParametrosIni.Cuenta, Pwd)
else:
MiCorreo = Correo(ParametrosIni.Servidor, ParametrosIni.Puerto, ParametrosIni.Cuenta)
print("\nIniciando el servicio de verificación archivos con la regla '"+ CausaAlerta + "'...")
if CargaInicial():
print("\nServicio iniciado")
ImprimeLinea()
InformaInicio(HoraTexto())
ImprimeLinea()
Error = False
sleep(ParametrosIni.Intervalo)
while not Error:
MonitoreaArchivos()
if TipoObjeto == 'directorio':
if not CargaInicial():
None
#Error = True
sleep(ParametrosIni.Intervalo)
else:
PreparaRegistroErr(ErrorLog4.format(HoraTexto(),Programa))
else:
PreparaRegistroErr(ErrorLog3.format(HoraTexto(),Programa))
else:
HintDeUso()
except(KeyboardInterrupt, SystemExit):
pass
PreparaRegistroErr(EventoLog100.format(HoraTexto(), Programa))
if __name__ == '__main__':
main()
else:
None
| gpl-3.0 | 3,174,296,808,603,974,700 | 33.212121 | 141 | 0.700066 | false |
MSFTOSSMgmt/WPSDSCLinux | Providers/Scripts/2.4x-2.5x/Scripts/nxIPAddress.py | 2 | 31170 | #!/usr/bin/env python
#============================================================================
# Copyright (c) Microsoft Corporation. All rights reserved. See license.txt for license information.
#============================================================================
import os
import sys
import tempfile
import re
import platform
import imp
import socket
protocol=imp.load_source('protocol','../protocol.py')
"""
MOF:
[ClassVersion("1.0.0"), FriendlyName("nxIPAddress")]
class MSFT_nxIPAddress : OMI_BaseResource
{
[write] string IPAddress;
[Key] string InterfaceName;
[write,ValueMap{"Automatic", "Static"},Values{"Automatic", "Static"}] string BootProtocol;
[write] string DefaultGateway;
[write,ValueMap{"Present", "Absent"}, Values{"Present", "Absent"}] string Ensure;
[write] integer PrefixLength;
[Key,write,ValueMap{"IPv4", "IPv6"},Values{"IPv4", "IPv6"}] string AddressFamily;
};
"""
def Print(s,file=sys.stdout):
file.write(s+'\n')
def ValidateAddresses(IPAddress,AddressFamily,PrefixLength):
if 'IPv4' in AddressFamily:
ptype=socket.AF_INET
elif 'IPv6' in AddressFamily:
ptype=socket.AF_INET6
else:
return False
try:
socket.inet_pton(ptype,IPAddress)
except:
Print('Error: IPAddress "'+IPAddress+'" is invalid.',file=sys.stderr)
return False
if type(PrefixLength) == int or type(PrefixLength) == long :
if 'IPv4' in AddressFamily and ( PrefixLength < 0 or PrefixLength > 32) :
Print('Error: PrefixLength "'+ str(PrefixLength) +'" is invalid. Values are 0-32.',file=sys.stderr)
return False
if 'IPv6' in AddressFamily and ( PrefixLength < 0 or PrefixLength > 128) :
Print('Error: PrefixLength "'+ str(PrefixLength) +'" is invalid. Values are 0-128.',file=sys.stderr)
return False
return True
def bitNetmaskConversion(PrefixLength):
if PrefixLength == '':
return ''
if type(PrefixLength) != long and type(PrefixLength) != int :
N = int(PrefixLength)
else :
N = PrefixLength
M = int(N / 8) #number of 255 sections (full octets)
MASK = 255
netmaskIP = ""
count = 0
while count < M:
netmaskIP = netmaskIP + "255."
count += 1
if N % 8 != 0:
netmaskIP += str((MASK << (8 - N%8)) & MASK) + "."
count += 1
while count < 4:
netmaskIP = netmaskIP + "0."
count += 1
if netmaskIP[-1] == ".":
netmaskIP = netmaskIP[:-1]
return netmaskIP
def netmaskBitConversion(netmask):
if netmask==None or netmask=='' :
return 0
arrTmp = netmask.strip("'")
arr = arrTmp.split(".")
sumT = 0
for i in arr:
i = int(i)
if i == 255:
sumT += 8
else:
j = 0
while j < 8:
sumT += (i >> j) & 1
j+=1
return sumT
def init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if PrefixLength == None:
PrefixLength=''
if BootProtocol == None or len(BootProtocol)<1:
BootProtocol='Automatic'
else :
BootProtocol=BootProtocol[0].upper()+BootProtocol[1:].lower()
if Ensure == None or len(Ensure)<1:
Ensure='Present'
else :
Ensure=Ensure[0].upper()+Ensure[1:].lower()
if AddressFamily == None or len(AddressFamily)<1:
AddressFamily='IPv4'
else :
AddressFamily=AddressFamily[0].upper()+AddressFamily[1].upper()+AddressFamily[2].lower()+AddressFamily[3:]
if IPAddress == None:
IPAddress=''
if len(IPAddress)>0:
if ValidateAddresses(IPAddress,AddressFamily,PrefixLength) == False:
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
elif BootProtocol != 'Automatic' and Ensure == 'Present':
Print('ERROR: BootProtocol != Automatic. IPAdress is required.',file=sys.stdout)
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
if DefaultGateway == None:
DefaultGateway=''
if len(DefaultGateway) > 0 and ValidateAddresses(DefaultGateway,AddressFamily,'') == False:
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
return True,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
def Set_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1]
MyDistro=GetMyDistro()
retval = MyDistro.Set(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
return retval
def Test_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1]
MyDistro=GetMyDistro()
return MyDistro.Test(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
def Get_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
arg_names=list(locals().keys())
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily]
retval = 0
MyDistro=GetMyDistro()
(retval, IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily) = MyDistro.Get(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
Ensure = protocol.MI_String(Ensure.encode("utf-8"))
IPAddress = protocol.MI_String(IPAddress.encode("utf-8"))
AddressFamily= protocol.MI_String(AddressFamily.encode("utf-8"))
InterfaceName = protocol.MI_String(InterfaceName.encode("utf-8"))
BootProtocol = protocol.MI_String(BootProtocol.encode("utf-8"))
DefaultGateway = protocol.MI_String(DefaultGateway.encode("utf-8"))
if type(PrefixLength) == int or type(PrefixLength) == long :
PrefixLength=protocol.MI_Uint32(PrefixLength)
else:
PrefixLength=protocol.MI_Uint32(int(PrefixLength))
retd={}
ld=locals()
for k in arg_names :
retd[k]=ld[k]
return retval, retd
def ReplaceFileContentsAtomic(filepath, contents):
"""
Write 'contents' to 'filepath' by creating a temp file, and replacing original.
"""
handle, temp = tempfile.mkstemp(dir = os.path.dirname(filepath))
if type(contents) == str :
contents=contents.encode('latin-1')
try:
os.write(handle, contents)
except IOError, e:
Print('ReplaceFileContentsAtomic','Writing to file ' + filepath + ' Exception is ' + str(e),file=sys.stderr)
return None
os.close(handle)
try:
os.rename(temp, filepath)
return None
except IOError, e:
Print('ReplaceFileContentsAtomic','Renaming ' + temp+ ' to ' + filepath + ' Exception is ' +str(e),file=sys.stderr)
try:
os.remove(filepath)
except IOError, e:
Print('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' +str(e),file=sys.stderr)
try:
os.rename(temp,filepath)
except IOError, e:
Print('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' +str(e),file=sys.stderr)
return 1
return 0
def GetMyDistro(dist_class_name=''):
"""
Return MyDistro object.
NOTE: Logging is not initialized at this point.
"""
if dist_class_name == '':
if 'Linux' in platform.system():
Distro=platform.dist()[0]
else : # I know this is not Linux!
if 'FreeBSD' in platform.system():
Distro=platform.system()
Distro=Distro.strip('"')
Distro=Distro.strip(' ')
dist_class_name=Distro+'Distro'
else:
Distro=dist_class_name
if not globals().has_key(dist_class_name):
Print(Distro+' is not a supported distribution.')
return None
return globals()[dist_class_name]() # the distro class inside this module.
class AbstractDistro(object):
def __init__(self):
self.gateway_file='/etc/sysconfig/network'
self.gateway_prefix=''
self.ifcfg_prefix='/etc/sysconfig/network-scripts/ifcfg-'
def init_re_dict(self,src_dict):
re_dict=dict()
for k in src_dict:
re_dict[k]=re.compile(r'\s*'+k+'.*')
return re_dict
def init_src_dicts(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
self.gateway_dict=dict()
self.ifcfg_v4_dict=dict()
self.ifcfg_v4_dict['ONBOOT=']='yes'
self.ifcfg_v4_dict['DEVICE=']=InterfaceName
if BootProtocol.lower() == 'static':
self.ifcfg_v4_dict['BOOTPROTO=']='none'
else:
self.ifcfg_v4_dict['BOOTPROTO=']='dhcp'
self.ifcfg_v4_dict['DHCPCLASS=']=''
self.ifcfg_v4_dict['IPADDR=']=IPAddress
if PrefixLength != 0 and PrefixLength != '':
self.ifcfg_v4_dict['NETMASK=']=bitNetmaskConversion(PrefixLength)
else:
self.ifcfg_v4_dict['NETMASK=']=''
self.ifcfg_v6_dict=dict()
self.ifcfg_v6_dict['ONBOOT=']='yes'
self.ifcfg_v6_dict['DEVICE=']=InterfaceName
if BootProtocol.lower() == 'static':
self.ifcfg_v6_dict['BOOTPROTO=']='none'
else:
self.ifcfg_v6_dict['BOOTPROTO=']='dhcp'
self.ifcfg_v6_dict['DHCPCLASS=']=''
if BootProtocol.lower() == 'static':
self.ifcfg_v6_dict['IPV6INIT=']='yes'
self.ifcfg_v6_dict['IPV6_AUTOCONF=']='no'
else :
self.ifcfg_v6_dict['IPV6INIT=']='yes'
self.ifcfg_v6_dict['IPV6_AUTOCONF=']='yes'
if PrefixLength != 0 and PrefixLength != '':
self.ifcfg_v6_dict['IPV6ADDR=']=IPAddress+'/'+str(PrefixLength)
else:
self.ifcfg_v6_dict['IPV6ADDR=']=IPAddress
self.gateway_dict['GATEWAY=']=DefaultGateway
if AddressFamily == 'IPv4':
self.ifcfg_dict=self.ifcfg_v4_dict
self.addr_key='IPADDR='
else :
self.ifcfg_dict=self.ifcfg_v6_dict
self.addr_key='IPV6ADDR='
self.gateway_dict['NETWORKING_IPV6=']='yes'
def src_dicts_to_params(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if AddressFamily=='IPv4':
if 'NETMASK=' in self.ifcfg_dict.keys() and len(self.ifcfg_dict['NETMASK=']) > 0 :
PrefixLength=netmaskBitConversion(self.ifcfg_dict['NETMASK='])
elif PrefixLength != '' and PrefixLength > 0 and '/' in self.ifcfg_dict[self.addr_key] :
PrefixLength=int(self.ifcfg_dict[self.addr_key].split('/')[1])
self.ifcfg_dict[self.addr_key]=self.ifcfg_dict[self.addr_key].split('/')[0]
bootproto=''
if BootProtocol != None and len(BootProtocol) > 0 :
if self.ifcfg_dict['BOOTPROTO='] == 'dhcp':
bootproto='Automatic'
else:
bootproto='Static'
gateway=''
if len(self.gateway_dict['GATEWAY=']) >0:
gateway=self.gateway_dict['GATEWAY=']
return self.ifcfg_dict[self.addr_key],self.ifcfg_dict['DEVICE='],bootproto,gateway,Ensure,PrefixLength,AddressFamily
def restart_network(self,Interface):
os.system('ifdown ' + Interface)
os.system('ifup ' + Interface)
return [0]
def interface_down(self,Interface):
os.system('ifconfig ' + Interface + ' down')
return [0]
def UpdateValuesInFile(self,fname,src_dict,re_dict,Ensure):
updated=''
if os.path.exists(fname) != True:
# if this file is not here - we will create it
try:
F = open(fname,'w+')
F.write('# Created by Microsoft DSC nxIPAddress Provider\n')
F.close()
except:
raise
try:
F = open(fname,'r')
for l in F.readlines():
if l[0]=='#':
updated+=l
continue
for k in re_dict:
if re_dict[k]!=None:
if re.match(re_dict[k],l): # re.match is anchored to the line start.
if len(src_dict[k])==0 :
l=''
re_dict[k]=None
break
else:
l=re.sub(re_dict[k],k+src_dict[k],l)
re_dict[k]=None
if len(l)>2:
updated+=l
for k in re_dict:
if re_dict[k] != None and len(src_dict[k]) > 0 :
l=k+src_dict[k]+'\n'
updated+=l
except:
raise
ReplaceFileContentsAtomic(fname,updated)
return [0]
def GetValuesFromFile(self,fname,src_dict,re_dict):
if os.path.exists(fname) != True:
return
try:
F = open(fname,'r')
for l in F.readlines():
for k in re_dict:
if re_dict[k]!=None:
if re.match(re_dict[k],l): # re.match is anchored to the line start.
src_dict[k]=l.split(k[-1])[1].strip('\n')
re_dict[k]=None
F.close()
except:
raise
def Set(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
retval=[-1]
if len(self.ifcfg_prefix)>0:
self.ifcfg_file=self.ifcfg_prefix+InterfaceName
if len(self.gateway_prefix)>0:
self.gateway_file=self.gateway_prefix+InterfaceName
self.init_src_dicts(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
gateway_re_dict=self.init_re_dict(self.gateway_dict)
ifcfg_re_dict=self.init_re_dict(self.ifcfg_dict)
if Ensure == 'Absent':
if len(self.ifcfg_prefix)>0:
if os.path.exists(self.ifcfg_file):
os.remove(self.ifcfg_file)
retval=[0]
else:
retval=self.UpdateValuesInFile(self.ifcfg_file,self.ifcfg_dict,ifcfg_re_dict,Ensure)
if len(self.gateway_prefix)>0:
if os.path.exists(self.gateway_file):
os.remove(self.gateway_file)
retval=[0]
else:
retval=self.UpdateValuesInFile(self.gateway_file,self.gateway_dict,gateway_re_dict,Ensure)
self.interface_down(InterfaceName)
else:
retval=self.UpdateValuesInFile(self.gateway_file,self.gateway_dict,gateway_re_dict,Ensure)
retval=self.UpdateValuesInFile(self.ifcfg_file,self.ifcfg_dict,ifcfg_re_dict,Ensure)
retval=self.restart_network(InterfaceName)
return retval
def Test(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if len(self.ifcfg_prefix)>0:
self.ifcfg_file=self.ifcfg_prefix+InterfaceName
if len(self.gateway_prefix)>0:
self.gateway_file=self.gateway_prefix+InterfaceName
self.init_src_dicts(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
test_gateway=dict(self.gateway_dict)
for k in test_gateway:
test_gateway[k]=''
test_gateway_re_dict=self.init_re_dict(self.gateway_dict)
self.GetValuesFromFile(self.gateway_file,test_gateway,test_gateway_re_dict)
for k in self.gateway_dict:
if k == 'default ' and len(self.gateway_dict[k]) >0: # SuSE
self.gateway_dict[k]=self.gateway_dict[k].split(' ')[0]
if self.gateway_dict[k] != test_gateway[k]:
return [-1]
test_ifcfg=dict(self.ifcfg_dict)
for k in test_ifcfg:
if k != 'iface ':
test_ifcfg[k]=''
test_ifcfg_re_dict=self.init_re_dict(self.ifcfg_dict)
self.GetValuesFromFile(self.ifcfg_file,test_ifcfg,test_ifcfg_re_dict)
if Ensure == 'Absent':
if 'iface ' in test_ifcfg.keys() and test_ifcfg['iface ']!=None and len(test_ifcfg['iface '])>0:
return [-1]
elif len(self.ifcfg_prefix)>0 and os.path.exists(self.ifcfg_file) :
return [-1]
else:
return [0]
for k in self.ifcfg_dict:
if self.ifcfg_dict[k] != test_ifcfg[k]:
return [-1]
return [0]
def Get(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
# calling Test here will fill the dicts with values
self.Test(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
self.src_dicts_to_params(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if PrefixLength=='':
PrefixLength=0
return 0,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
class SuSEDistro(AbstractDistro):
def __init__(self):
super(SuSEDistro,self).__init__()
self.gateway_prefix='/etc/sysconfig/network/ifroute-'
self.ifcfg_prefix='/etc/sysconfig/network/ifcfg-'
def init_src_dicts(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
self.gateway_v4_dict=dict()
self.gateway_v6_dict=dict()
if BootProtocol.lower() != 'static' or len(DefaultGateway) == 0:
self.gateway_v4_dict['default ']=''
self.gateway_v6_dict['default ']=''
else:
self.gateway_v4_dict['default ']=DefaultGateway+' '+bitNetmaskConversion(PrefixLength)+' '+InterfaceName
self.gateway_v6_dict['default ']=DefaultGateway+' '+InterfaceName
self.ifcfg_v4_dict=dict()
if BootProtocol.lower() != 'static':
self.ifcfg_v4_dict['BOOTPROTO=']='dhcp'
else:
self.ifcfg_v4_dict['BOOTPROTO=']='static'
self.ifcfg_v4_dict['STARTMODE=']='auto'
self.ifcfg_v4_dict['IPADDR=']=IPAddress
self.ifcfg_v4_dict['NETMASK=']=bitNetmaskConversion(PrefixLength)
self.ifcfg_v6_dict=dict()
if BootProtocol.lower() != 'static':
self.ifcfg_v6_dict['BOOTPROTO=']='autoip'
else:
self.ifcfg_v6_dict['BOOTPROTO=']='static'
self.ifcfg_v6_dict['STARTMODE=']='auto'
if PrefixLength != 0 and PrefixLength != '':
self.ifcfg_v6_dict['IPADDR=']=IPAddress+'/'+str(PrefixLength)
else:
self.ifcfg_v6_dict['IPADDR=']=IPAddress
if AddressFamily == 'IPv4':
self.ifcfg_dict=self.ifcfg_v4_dict
self.addr_key='IPADDR='
self.gateway_dict=self.gateway_v4_dict
else :
self.ifcfg_dict=self.ifcfg_v6_dict
self.addr_key='IPADDR='
self.gateway_dict=self.gateway_v6_dict
def src_dicts_to_params(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if AddressFamily=='IPv4':
if 'NETMASK=' in self.ifcfg_dict.keys() and len(self.ifcfg_dict['NETMASK=']) > 0 :
PrefixLength=netmaskBitConversion(self.ifcfg_dict['NETMASK='])
elif PrefixLength != '' and PrefixLength > 0 and '/' in self.ifcfg_dict[self.addr_key] :
PrefixLength=int(self.ifcfg_dict[self.addr_key].split('/')[1])
self.ifcfg_dict[self.addr_key]=self.ifcfg_dict[self.addr_key].split('/')[0]
bootproto=''
if BootProtocol != '' and 'BOOTPROTO=' in self.ifcfg_v4_dict.keys() and len(self.ifcfg_v4_dict['BOOTPROTO=']) >0 :
if self.ifcfg_v4_dict['BOOTPROTO='] != 'static':
bootproto='Automatic'
else:
bootproto='Static'
gateway=''
# The gateway line here for SuSE is 'default <addr> <interface>'.
# Remove the <interface> so it can match <addr>.
if len(self.gateway_dict['default ']) >0:
gateway=self.gateway_dict['default '].split(' ')[0]
return self.ifcfg_dict['IPADDR='],self.ifcfg_file.split('-')[-1],bootproto,gateway,Ensure,PrefixLength,AddressFamily
def restart_network(self,Interface):
os.system('ifdown ' + Interface)
os.system('ifup ' + Interface)
return [0]
class debianDistro(AbstractDistro):
def __init__(self):
super(debianDistro,self).__init__()
self.ifcfg_prefix=''
self.gateway_prefix=''
self.ifcfg_file='/etc/network/interfaces'
self.gateway_file='/etc/network/interfaces'
def init_re_dict(self,src_dict):
re_dict=dict()
for k in src_dict:
re_dict[k]=re.compile(r'\s*'+k+'.*')
if 'iface ' in re_dict:
re_dict['iface ']=re.compile(r'\s*iface '+src_dict['iface '])
if 'inet ' in re_dict:
re_dict['inet ']=re.compile(r'\s*iface '+src_dict['iface '] + ' inet .*')
if 'inet6 ' in re_dict:
re_dict['inet6 ']=re.compile(r'\s*iface '+src_dict['iface '] + ' inet6 .*')
return re_dict
def init_src_dicts(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
self.ifcfg_v4_dict={}
self.ifcfg_v6_dict={}
self.gateway_dict={}
if BootProtocol.lower() == 'static' :
self.ifcfg_v4_dict['inet '] = 'static'
elif BootProtocol.lower() == 'automatic':
self.ifcfg_v4_dict['inet '] = 'dhcp'
else:
self.ifcfg_v4_dict['inet '] = ''
self.ifcfg_v4_dict['iface ']=InterfaceName
self.ifcfg_v4_dict['autoconf ']=''
self.ifcfg_v4_dict['network ']=''
self.ifcfg_v4_dict['address ']=IPAddress
if PrefixLength !=0 and PrefixLength != '':
self.ifcfg_v4_dict['netmask ']=bitNetmaskConversion(PrefixLength)
self.ifcfg_v6_dict['netmask ']=str(PrefixLength)
else:
self.ifcfg_v4_dict['netmask ']=''
self.ifcfg_v6_dict['netmask ']=''
self.ifcfg_v4_dict['gateway ']=DefaultGateway
if len(BootProtocol) > 0:
self.ifcfg_v6_dict['inet6 ']='static' # static is used for autoconf as well
else:
self.ifcfg_v6_dict['inet6 ']=''
self.ifcfg_v6_dict['iface ']=InterfaceName
if PrefixLength !=0 and PrefixLength != '':
self.ifcfg_v6_dict['address ']=IPAddress
else:
self.ifcfg_v6_dict['address ']=IPAddress
self.ifcfg_v6_dict['gateway ']=DefaultGateway
if AddressFamily == "IPv4":
self.ifcfg_dict=self.ifcfg_v4_dict
self.inet='inet '
else:
if BootProtocol.lower() != 'static':
self.ifcfg_v6_dict['autoconf ']='1'
else:
self.ifcfg_v6_dict['autoconf ']='0'
self.ifcfg_dict=self.ifcfg_v6_dict
self.inet='inet6 '
if Ensure == "Absent":
auto='auto '+InterfaceName
self.ifcfg_dict[auto]=''
def src_dicts_to_params(self,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
inet=''
if BootProtocol != None and len(BootProtocol) > 0 :
if AddressFamily=='IPv6':
if self.ifcfg_dict['autoconf '] == '1' :
inet = 'Automatic'
else:
inet = 'Static'
else:
if self.ifcfg_dict[self.inet] == 'dhcp':
inet = 'Automatic'
else:
inet = 'Static'
if AddressFamily=='IPv4':
if 'netmask' in self.ifcfg_dict.keys() and len(self.ifcfg_dict['netmask']) > 0 :
PrefixLength=netmaskBitConversion(self.ifcfg_dict['netmask'])
elif PrefixLength != '' and PrefixLength > 0 and '/' in self.ifcfg_dict['address '] :
PrefixLength=int(self.ifcfg_dict['address '].split('/')[1])
self.ifcfg_dict['address ']=self.ifcfg_dict['address '].split('/')[0]
gateway=''
if len(self.ifcfg_dict['gateway ']) >0:
gateway=self.ifcfg_dict['gateway ']
return self.ifcfg_dict['address '],self.ifcfg_dict['iface '],inet,gateway,Ensure,PrefixLength,AddressFamily
def restart_network(self,Interface):
os.system('ifdown --exclude=lo ' + Interface +'; ifup --exclude=lo '+ Interface)
return [0]
def UpdateValuesInFile(self,fname,src_dict,re_dict,Ensure):
if len(src_dict) == 0:
return [0]
removing=False
if self.inet in src_dict.keys() and Ensure=='Absent': # we are trying to remove
removing=True
if removing == False and os.path.exists(fname) != True:
# if this file is not here - we will create it
try:
F = open(fname,'w+')
F.write('# Created by nxIPAddress DSC PRovider\n')
F.close()
except:
raise
try:
F = open(fname,'r')
txt=F.read()
if 'iface ' in src_dict.keys():
srch=r'(^auto '+src_dict['iface ']+'$.*?^iface '+src_dict['iface ']+'.*?$|^iface '+src_dict['iface ']+'.*?$).*?((^auto )|(^iface )|(^$))'
updated=''
r=re.search(srch,txt,flags=re.S|re.M)
if r == None:
if removing: #nothing to remove
return [0]
else : # append values to the end
l='auto ' + src_dict['iface '] + '\niface '+src_dict['iface '] + ' ' + self.inet+src_dict[self.inet] + '\n'
if len(updated) > 0 and updated[-1] != '\n':
updated+='\n'
updated+=l
re_dict['iface ']=None
re_dict[self.inet]=None
for k in re_dict:
if re_dict[k] != None and len(src_dict[k]) > 0 :
l=k+src_dict[k]+'\n'
updated+=l
txt=txt+updated
else: #matched
if removing:
tail=''
rpl=re.compile(r.group(0),flags=re.S|re.M)
txt=rpl.sub(tail,txt)
if txt[-2:] == '\n\n':
txt=txt[:-1]
else : # replace tags - preserve unknown tags
t=r.group(0)
for l in t.splitlines():
if len(l)>1:
l+='\n'
else:
continue
if 'iface ' in re_dict.keys() and re_dict['iface '] != None :
if re.match(re_dict['iface '],l) :
l='iface '+src_dict['iface '] + ' ' + self.inet+src_dict[self.inet] + '\n'
re_dict['iface ']=None
re_dict[self.inet]=None
updated+=l
continue
for k in re_dict.keys():
if re_dict[k]!=None:
if re.match(re_dict[k],l): # re.match is anchored to the line start.
if len(src_dict[k])==0 :
l=''
else:
l=re.sub(re_dict[k],k+src_dict[k],l)
if len(l)>0 and l[-1]!='\n':
l+='\n'
re_dict[k]=None
break
if len(l)>2:
updated+=l
for k in re_dict:
if re_dict[k] != None and len(src_dict[k]) > 0 :
l=k+src_dict[k]+'\n'
updated+=l
tail=''
if updated[-1] != '\n':
tail='\n'
updated+=tail
rpl=re.compile(r.group(0),flags=re.S|re.M)
txt=rpl.sub(updated,txt)
if txt[-2:] == '\n\n':
txt=txt[:-1]
F.close()
except:
raise
ReplaceFileContentsAtomic(fname,txt)
return [0]
def GetValuesFromFile(self,fname,src_dict,re_dict):
if os.path.exists(fname) != True:
return
try:
F = open(fname,'r')
txt=F.read()
if 'iface ' in src_dict.keys():
srch=r'(^auto '+src_dict['iface ']+'$.*?^iface '+src_dict['iface ']+'.*?$|^iface '+src_dict['iface ']+'.*?$).*?((^auto )|(^iface )|(^$))'
r=re.search(srch,txt,flags=re.S|re.M)
if r == None:
return
txt=r.group(0)
for l in txt.splitlines():
for k in re_dict:
if re_dict[k]!=None:
if re.match(re_dict[k],l): # re.match is anchored to the line start.
if k == self.inet:
src_dict[k]=l.split(k[-1])[3].strip('\n')
else:
src_dict[k]=l.split(k[-1])[1].strip('\n')
re_dict[k]=None
F.close()
except:
raise
class redhatDistro(AbstractDistro):
def __init__(self):
super(redhatDistro,self).__init__()
class centosDistro(redhatDistro):
def __init__(self):
super(centosDistro,self).__init__()
class UbuntuDistro(debianDistro):
def __init__(self):
super(UbuntuDistro,self).__init__()
class LinuxMintDistro(UbuntuDistro):
def __init__(self):
super(LinuxMintDistro,self).__init__()
class fedoraDistro(redhatDistro):
def __init__(self):
super(fedoraDistro,self).__init__()
| mit | 2,256,311,520,288,610,000 | 42.291667 | 201 | 0.559801 | false |
MPIBGC-TEE/CompartmentalSystems | tests/Test_pwc_model_run_14C.py | 1 | 5264 | import unittest
import numpy as np
from sympy import Function, Matrix, symbols
from CompartmentalSystems.smooth_reservoir_model import SmoothReservoirModel
from CompartmentalSystems.smooth_model_run import SmoothModelRun
from CompartmentalSystems.pwc_model_run import PWCModelRun
from CompartmentalSystems.smooth_model_run_14C import SmoothModelRun_14C
from CompartmentalSystems.pwc_model_run_14C import PWCModelRun_14C
class TestPWCModelRun_14C(unittest.TestCase):
def setUp(self):
x, y, t, k = symbols("x y t k")
u_1 = Function('u_1')(x, t)
state_vector = Matrix([x, y])
B = Matrix([[-1, 1.5],
[k, -2]])
u = Matrix(2, 1, [u_1, 1])
self.srm = SmoothReservoirModel.from_B_u(
state_vector,
t,
B,
u
)
start_values = np.array([10, 40])
t_0 = 0
t_max = 10
times = np.linspace(t_0, t_max, 11)
disc_times = [5]
parameter_dicts = [{k: 1}, {k: 0.5}]
func_dicts = [{u_1: lambda x_14C, t: 9}, {u_1: lambda x_14C, t: 3}]
pwc_mr = PWCModelRun(
self.srm,
parameter_dicts,
start_values,
times,
disc_times,
func_dicts
)
self.alpha = 0.5
start_values_14C = start_values * self.alpha
def Fa_func(t): return self.alpha
decay_rate = 1.0
self.pwc_mr_14C = PWCModelRun_14C(
pwc_mr,
start_values_14C,
Fa_func,
decay_rate
)
timess = [
np.linspace(t_0, disc_times[0], 6),
np.linspace(disc_times[0], t_max, 6)
]
smrs_14C = []
tmp_start_values = start_values
tmp_start_values_14C = start_values_14C
for i in range(len(disc_times)+1):
smr = SmoothModelRun(
self.srm,
parameter_dict=parameter_dicts[i],
start_values=tmp_start_values,
times=timess[i],
func_set=func_dicts[i]
)
tmp_start_values = smr.solve()[-1]
smrs_14C.append(
SmoothModelRun_14C(
smr,
tmp_start_values_14C,
Fa_func,
decay_rate
)
)
tmp_start_values_14C = smrs_14C[i].solve()[-1]
self.smrs_14C = smrs_14C
def test_solve(self):
soln_smrs_14C = [smr_14C.solve() for smr_14C in self.smrs_14C]
L = [soln[:-1] for soln in soln_smrs_14C[:-1]]
L += [soln_smrs_14C[-1]]
soln_14C_ref = np.concatenate(L, axis=0)
self.assertTrue(
np.allclose(
soln_14C_ref,
self.pwc_mr_14C.solve()
)
)
def test_acc_gross_external_output_vector(self):
ageov_smrs_14C = [smr_14C.acc_gross_external_output_vector()
for smr_14C in self.smrs_14C]
ageov_14C_ref = np.concatenate(ageov_smrs_14C, axis=0)
self.assertTrue(
np.allclose(
ageov_14C_ref,
self.pwc_mr_14C.acc_gross_external_output_vector()
)
)
def test_acc_net_external_output_vector(self):
aneov_smrs_14C = [smr_14C.acc_net_external_output_vector()
for smr_14C in self.smrs_14C]
aneov_14C_ref = np.concatenate(aneov_smrs_14C, axis=0)
self.assertTrue(
np.allclose(
aneov_14C_ref,
self.pwc_mr_14C.acc_net_external_output_vector()
)
)
# Delta 14C methods
def test_solve_Delta_14C(self):
soln_smrs_Delta_14C = [
smr_14C.solve_Delta_14C(alpha=self.alpha)
for smr_14C in self.smrs_14C
]
L = [soln[:-1] for soln in soln_smrs_Delta_14C[:-1]]
L += [soln_smrs_Delta_14C[-1]]
Delta_14C_ref = np.concatenate(L, axis=0)
self.assertTrue(
np.allclose(
Delta_14C_ref,
self.pwc_mr_14C.solve_Delta_14C(alpha=self.alpha),
equal_nan=True
)
)
def test_Delta_14C(self):
methods = [
"acc_gross_external_input_vector_Delta_14C",
"acc_net_external_input_vector",
"acc_gross_external_output_vector",
"acc_net_external_output_vector",
"acc_gross_internal_flux_matrix",
"acc_net_internal_flux_matrix"
]
for method in methods:
with self.subTest():
Delta_14C = [getattr(smr_14C, method)()
for smr_14C in self.smrs_14C]
Delta_14C_ref = np.concatenate(Delta_14C, axis=0)
self.assertTrue(
np.allclose(
Delta_14C_ref,
getattr(self.pwc_mr_14C, method)(),
equal_nan=True
)
)
###############################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.discover(".", pattern=__file__)
unittest.main()
| mit | -2,940,710,178,508,901,000 | 29.427746 | 79 | 0.494111 | false |
kurik/uPython-DHT22 | DHT22.py | 1 | 3132 | import pyb
from pyb import Pin
from pyb import ExtInt
# We need to use global properties here as any allocation of a memory (aka declaration of a variable)
# during the read cycle causes non-acceptable delay and we are loosing data than
nc = None
gnd = None
vcc = None
data = None
timer = None
micros = None
FALL_EDGES = 42 # we have 42 falling edges during data receive
times = list(range(FALL_EDGES))
index = 0
# The interrupt handler
def edge(line):
global index
global times
global micros
times[index] = micros.counter()
if index < (FALL_EDGES - 1): # Avoid overflow of the buffer in case of any noise on the line
index += 1
def init(timer_id = 2, nc_pin = 'Y3', gnd_pin = 'Y4', vcc_pin = 'Y1', data_pin = 'Y2'):
global nc
global gnd
global vcc
global data
global micros
global timer
# Leave the pin unconnected
if nc_pin is not None:
nc = Pin(nc_pin)
nc.init(Pin.OUT_OD)
nc.high()
# Make the pin work as GND
if gnd_pin is not None:
gnd = Pin(gnd_pin)
gnd.init(Pin.OUT_PP)
gnd.low()
# Make the pin work as power supply
if vcc_pin is not None:
vcc = Pin(vcc_pin)
vcc.init(Pin.OUT_PP)
vcc.high()
# Configure the pid for data communication
data = Pin(data_pin)
# Save the ID of the timer we are going to use
timer = timer_id
# setup the 1uS timer
micros = pyb.Timer(timer, prescaler=83, period=0x3fffffff) # 1MHz ~ 1uS
# Prepare interrupt handler
ExtInt(data, ExtInt.IRQ_FALLING, Pin.PULL_UP, None)
ExtInt(data, ExtInt.IRQ_FALLING, Pin.PULL_UP, edge)
# Start signal
def do_measurement():
global nc
global gnd
global vcc
global data
global micros
global timer
global index
# Send the START signal
data.init(Pin.OUT_PP)
data.low()
micros.counter(0)
while micros.counter() < 25000:
pass
data.high()
micros.counter(0)
while micros.counter() < 20:
pass
# Activate reading on the data pin
index = 0
data.init(Pin.IN, Pin.PULL_UP)
# Till 5mS the measurement must be over
pyb.delay(5)
# Parse the data read from the sensor
def process_data():
global times
i = 2 # We ignore the first two falling edges as it is a respomse on the start signal
result_i = 0
result = list([0, 0, 0, 0, 0])
while i < FALL_EDGES:
result[result_i] <<= 1
if times[i] - times[i - 1] > 100:
result[result_i] += 1
if (i % 8) == 1:
result_i += 1
i += 1
[int_rh, dec_rh, int_t, dec_t, csum] = result
humidity = ((int_rh * 256) + dec_rh)/10
temperature = (((int_t & 0x7F) * 256) + dec_t)/10
if (int_t & 0x80) > 0:
temperature *= -1
comp_sum = int_rh + dec_rh + int_t + dec_t
if (comp_sum & 0xFF) != csum:
raise ValueError('Checksum does not match')
return (humidity, temperature)
def measure():
do_measurement()
if index != (FALL_EDGES -1):
raise ValueError('Data transfer failed: %s falling edges only' % str(index))
return process_data()
| mit | 4,539,732,337,879,561,000 | 26.716814 | 101 | 0.611111 | false |
pepetreshere/odoo | addons/account/tests/common.py | 1 | 31193 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields
from odoo.tests.common import SavepointCase, HttpSavepointCase, tagged, Form
import time
import base64
from lxml import etree
@tagged('post_install', '-at_install')
class AccountTestInvoicingCommon(SavepointCase):
@classmethod
def copy_account(cls, account):
suffix_nb = 1
while True:
new_code = '%s (%s)' % (account.code, suffix_nb)
if account.search_count([('company_id', '=', account.company_id.id), ('code', '=', new_code)]):
suffix_nb += 1
else:
return account.copy(default={'code': new_code})
@classmethod
def setUpClass(cls, chart_template_ref=None):
super(AccountTestInvoicingCommon, cls).setUpClass()
if chart_template_ref:
chart_template = cls.env.ref(chart_template_ref)
else:
chart_template = cls.env.ref('l10n_generic_coa.configurable_chart_template', raise_if_not_found=False)
if not chart_template:
cls.tearDownClass()
# skipTest raises exception
cls.skipTest(cls, "Accounting Tests skipped because the user's company has no chart of accounts.")
# Create user.
user = cls.env['res.users'].create({
'name': 'Because I am accountman!',
'login': 'accountman',
'password': 'accountman',
'groups_id': [(6, 0, cls.env.user.groups_id.ids), (4, cls.env.ref('account.group_account_user').id)],
})
user.partner_id.email = '[email protected]'
# Shadow the current environment/cursor with one having the report user.
# This is mandatory to test access rights.
cls.env = cls.env(user=user)
cls.cr = cls.env.cr
cls.company_data_2 = cls.setup_company_data('company_2_data', chart_template=chart_template)
cls.company_data = cls.setup_company_data('company_1_data', chart_template=chart_template)
user.write({
'company_ids': [(6, 0, (cls.company_data['company'] + cls.company_data_2['company']).ids)],
'company_id': cls.company_data['company'].id,
})
cls.currency_data = cls.setup_multi_currency_data()
# ==== Taxes ====
cls.tax_sale_a = cls.company_data['default_tax_sale']
cls.tax_sale_b = cls.company_data['default_tax_sale'].copy()
cls.tax_purchase_a = cls.company_data['default_tax_purchase']
cls.tax_purchase_b = cls.company_data['default_tax_purchase'].copy()
cls.tax_armageddon = cls.setup_armageddon_tax('complex_tax', cls.company_data)
# ==== Products ====
cls.product_a = cls.env['product.product'].create({
'name': 'product_a',
'uom_id': cls.env.ref('uom.product_uom_unit').id,
'lst_price': 1000.0,
'standard_price': 800.0,
'property_account_income_id': cls.company_data['default_account_revenue'].id,
'property_account_expense_id': cls.company_data['default_account_expense'].id,
'taxes_id': [(6, 0, cls.tax_sale_a.ids)],
'supplier_taxes_id': [(6, 0, cls.tax_purchase_a.ids)],
})
cls.product_b = cls.env['product.product'].create({
'name': 'product_b',
'uom_id': cls.env.ref('uom.product_uom_dozen').id,
'lst_price': 200.0,
'standard_price': 160.0,
'property_account_income_id': cls.copy_account(cls.company_data['default_account_revenue']).id,
'property_account_expense_id': cls.copy_account(cls.company_data['default_account_expense']).id,
'taxes_id': [(6, 0, (cls.tax_sale_a + cls.tax_sale_b).ids)],
'supplier_taxes_id': [(6, 0, (cls.tax_purchase_a + cls.tax_purchase_b).ids)],
})
# ==== Fiscal positions ====
cls.fiscal_pos_a = cls.env['account.fiscal.position'].create({
'name': 'fiscal_pos_a',
'tax_ids': [
(0, None, {
'tax_src_id': cls.tax_sale_a.id,
'tax_dest_id': cls.tax_sale_b.id,
}),
(0, None, {
'tax_src_id': cls.tax_purchase_a.id,
'tax_dest_id': cls.tax_purchase_b.id,
}),
],
'account_ids': [
(0, None, {
'account_src_id': cls.product_a.property_account_income_id.id,
'account_dest_id': cls.product_b.property_account_income_id.id,
}),
(0, None, {
'account_src_id': cls.product_a.property_account_expense_id.id,
'account_dest_id': cls.product_b.property_account_expense_id.id,
}),
],
})
# ==== Payment terms ====
cls.pay_terms_a = cls.env.ref('account.account_payment_term_immediate')
cls.pay_terms_b = cls.env['account.payment.term'].create({
'name': '30% Advance End of Following Month',
'note': 'Payment terms: 30% Advance End of Following Month',
'line_ids': [
(0, 0, {
'value': 'percent',
'value_amount': 30.0,
'sequence': 400,
'days': 0,
'option': 'day_after_invoice_date',
}),
(0, 0, {
'value': 'balance',
'value_amount': 0.0,
'sequence': 500,
'days': 31,
'option': 'day_following_month',
}),
],
})
# ==== Partners ====
cls.partner_a = cls.env['res.partner'].create({
'name': 'partner_a',
'property_payment_term_id': cls.pay_terms_a.id,
'property_supplier_payment_term_id': cls.pay_terms_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].id,
'property_account_payable_id': cls.company_data['default_account_payable'].id,
'company_id': False,
})
cls.partner_b = cls.env['res.partner'].create({
'name': 'partner_b',
'property_payment_term_id': cls.pay_terms_b.id,
'property_supplier_payment_term_id': cls.pay_terms_b.id,
'property_account_position_id': cls.fiscal_pos_a.id,
'property_account_receivable_id': cls.company_data['default_account_receivable'].copy().id,
'property_account_payable_id': cls.company_data['default_account_payable'].copy().id,
'company_id': False,
})
# ==== Cash rounding ====
cls.cash_rounding_a = cls.env['account.cash.rounding'].create({
'name': 'add_invoice_line',
'rounding': 0.05,
'strategy': 'add_invoice_line',
'profit_account_id': cls.company_data['default_account_revenue'].copy().id,
'loss_account_id': cls.company_data['default_account_expense'].copy().id,
'rounding_method': 'UP',
})
cls.cash_rounding_b = cls.env['account.cash.rounding'].create({
'name': 'biggest_tax',
'rounding': 0.05,
'strategy': 'biggest_tax',
'rounding_method': 'DOWN',
})
@classmethod
def setup_company_data(cls, company_name, chart_template=None, **kwargs):
''' Create a new company having the name passed as parameter.
A chart of accounts will be installed to this company: the same as the current company one.
The current user will get access to this company.
:param chart_template: The chart template to be used on this new company.
:param company_name: The name of the company.
:return: A dictionary will be returned containing all relevant accounting data for testing.
'''
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = [('company_id', '=', company.id)] + domain
account = None
if template_code:
account = cls.env['account.account'].search(domain + [('code', '=like', template_code + '%')], limit=1)
if not account:
account = cls.env['account.account'].search(domain, limit=1)
return account
chart_template = chart_template or cls.env.company.chart_template_id
company = cls.env['res.company'].create({
'name': company_name,
**kwargs,
})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
# The currency could be different after the installation of the chart template.
if kwargs.get('currency_id'):
company.write({'currency_id': kwargs['currency_id']})
return {
'company': company,
'currency': company.currency_id,
'default_account_revenue': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)
], limit=1),
'default_account_expense': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)
], limit=1),
'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [
('user_type_id.type', '=', 'receivable')
]),
'default_account_payable': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id.type', '=', 'payable')
], limit=1),
'default_account_assets': cls.env['account.account'].search([
('company_id', '=', company.id),
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)
], limit=1),
'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'),
'default_journal_misc': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'general')
], limit=1),
'default_journal_sale': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'sale')
], limit=1),
'default_journal_purchase': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'purchase')
], limit=1),
'default_journal_bank': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'bank')
], limit=1),
'default_journal_cash': cls.env['account.journal'].search([
('company_id', '=', company.id),
('type', '=', 'cash')
], limit=1),
'default_tax_sale': company.account_sale_tax_id,
'default_tax_purchase': company.account_purchase_tax_id,
}
@classmethod
def setup_multi_currency_data(cls, default_values={}, rate2016=3.0, rate2017=2.0):
foreign_currency = cls.env['res.currency'].create({
'name': 'Gold Coin',
'symbol': '☺',
'rounding': 0.001,
'position': 'after',
'currency_unit_label': 'Gold',
'currency_subunit_label': 'Silver',
**default_values,
})
rate1 = cls.env['res.currency.rate'].create({
'name': '2016-01-01',
'rate': rate2016,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
rate2 = cls.env['res.currency.rate'].create({
'name': '2017-01-01',
'rate': rate2017,
'currency_id': foreign_currency.id,
'company_id': cls.env.company.id,
})
return {
'currency': foreign_currency,
'rates': rate1 + rate2,
}
@classmethod
def setup_armageddon_tax(cls, tax_name, company_data):
return cls.env['account.tax'].create({
'name': '%s (group)' % tax_name,
'amount_type': 'group',
'amount': 0.0,
'children_tax_ids': [
(0, 0, {
'name': '%s (child 1)' % tax_name,
'amount_type': 'percent',
'amount': 20.0,
'price_include': True,
'include_base_amount': True,
'tax_exigibility': 'on_invoice',
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 40,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
(0, 0, {
'factor_percent': 60,
'repartition_type': 'tax',
# /!\ No account set.
}),
],
}),
(0, 0, {
'name': '%s (child 2)' % tax_name,
'amount_type': 'percent',
'amount': 10.0,
'tax_exigibility': 'on_payment',
'cash_basis_transition_account_id': company_data['default_account_tax_sale'].copy().id,
'invoice_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
'refund_repartition_line_ids': [
(0, 0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0, 0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': company_data['default_account_tax_sale'].id,
}),
],
}),
],
})
@classmethod
def init_invoice(cls, move_type, partner=None, invoice_date=None, post=False, products=[], amounts=[], taxes=None):
move_form = Form(cls.env['account.move'].with_context(default_move_type=move_type))
move_form.invoice_date = invoice_date or fields.Date.from_string('2019-01-01')
move_form.partner_id = partner or cls.partner_a
for product in products:
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = product
if taxes:
line_form.tax_ids.clear()
line_form.tax_ids.add(taxes)
for amount in amounts:
with move_form.invoice_line_ids.new() as line_form:
line_form.price_unit = amount
if taxes:
line_form.tax_ids.clear()
line_form.tax_ids.add(taxes)
rslt = move_form.save()
if post:
rslt.action_post()
return rslt
def assertInvoiceValues(self, move, expected_lines_values, expected_move_values):
def sort_lines(lines):
return lines.sorted(lambda line: (line.exclude_from_invoice_tab, not bool(line.tax_line_id), line.name or '', line.balance))
self.assertRecordValues(sort_lines(move.line_ids.sorted()), expected_lines_values)
self.assertRecordValues(sort_lines(move.invoice_line_ids.sorted()), expected_lines_values[:len(move.invoice_line_ids)])
self.assertRecordValues(move, [expected_move_values])
####################################################
# Xml Comparison
####################################################
def _turn_node_as_dict_hierarchy(self, node):
''' Turn the node as a python dictionary to be compared later with another one.
Allow to ignore the management of namespaces.
:param node: A node inside an xml tree.
:return: A python dictionary.
'''
tag_split = node.tag.split('}')
tag_wo_ns = tag_split[-1]
attrib_wo_ns = {k: v for k, v in node.attrib.items() if '}' not in k}
return {
'tag': tag_wo_ns,
'namespace': None if len(tag_split) < 2 else tag_split[0],
'text': (node.text or '').strip(),
'attrib': attrib_wo_ns,
'children': [self._turn_node_as_dict_hierarchy(child_node) for child_node in node.getchildren()],
}
def assertXmlTreeEqual(self, xml_tree, expected_xml_tree):
''' Compare two lxml.etree.
:param xml_tree: The current tree.
:param expected_xml_tree: The expected tree.
'''
def assertNodeDictEqual(node_dict, expected_node_dict):
''' Compare nodes created by the `_turn_node_as_dict_hierarchy` method.
:param node_dict: The node to compare with.
:param expected_node_dict: The expected node.
'''
# Check tag.
self.assertEqual(node_dict['tag'], expected_node_dict['tag'])
# Check attributes.
node_dict_attrib = {k: '___ignore___' if expected_node_dict['attrib'].get(k) == '___ignore___' else v
for k, v in node_dict['attrib'].items()}
expected_node_dict_attrib = {k: v for k, v in expected_node_dict['attrib'].items() if v != '___remove___'}
self.assertDictEqual(
node_dict_attrib,
expected_node_dict_attrib,
"Element attributes are different for node %s" % node_dict['tag'],
)
# Check text.
if expected_node_dict['text'] != '___ignore___':
self.assertEqual(
node_dict['text'],
expected_node_dict['text'],
"Element text are different for node %s" % node_dict['tag'],
)
# Check children.
self.assertEqual(
[child['tag'] for child in node_dict['children']],
[child['tag'] for child in expected_node_dict['children']],
"Number of children elements for node %s is different." % node_dict['tag'],
)
for child_node_dict, expected_child_node_dict in zip(node_dict['children'], expected_node_dict['children']):
assertNodeDictEqual(child_node_dict, expected_child_node_dict)
assertNodeDictEqual(
self._turn_node_as_dict_hierarchy(xml_tree),
self._turn_node_as_dict_hierarchy(expected_xml_tree),
)
def with_applied_xpath(self, xml_tree, xpath):
''' Applies the xpath to the xml_tree passed as parameter.
:param xml_tree: An instance of etree.
:param xpath: The xpath to apply as a string.
:return: The resulting etree after applying the xpaths.
'''
diff_xml_tree = etree.fromstring('<data>%s</data>' % xpath)
return self.env['ir.ui.view'].apply_inheritance_specs(xml_tree, diff_xml_tree)
def get_xml_tree_from_attachment(self, attachment):
''' Extract an instance of etree from an ir.attachment.
:param attachment: An ir.attachment.
:return: An instance of etree.
'''
return etree.fromstring(base64.b64decode(attachment.with_context(bin_size=False).datas))
def get_xml_tree_from_string(self, xml_tree_str):
''' Convert the string passed as parameter to an instance of etree.
:param xml_tree_str: A string representing an xml.
:return: An instance of etree.
'''
return etree.fromstring(xml_tree_str)
@tagged('post_install', '-at_install')
class AccountTestInvoicingHttpCommon(AccountTestInvoicingCommon, HttpSavepointCase):
pass
class TestAccountReconciliationCommon(AccountTestInvoicingCommon):
"""Tests for reconciliation (account.tax)
Test used to check that when doing a sale or purchase invoice in a different currency,
the result will be balanced.
"""
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.company = cls.company_data['company']
cls.company.currency_id = cls.env.ref('base.EUR')
cls.partner_agrolait = cls.env['res.partner'].create({
'name': 'Deco Addict',
'is_company': True,
'country_id': cls.env.ref('base.us').id,
})
cls.partner_agrolait_id = cls.partner_agrolait.id
cls.currency_swiss_id = cls.env.ref("base.CHF").id
cls.currency_usd_id = cls.env.ref("base.USD").id
cls.currency_euro_id = cls.env.ref("base.EUR").id
cls.account_rcv = cls.company_data['default_account_receivable']
cls.account_rsa = cls.company_data['default_account_payable']
cls.product = cls.env['product.product'].create({
'name': 'Product Product 4',
'standard_price': 500.0,
'list_price': 750.0,
'type': 'consu',
'categ_id': cls.env.ref('product.product_category_all').id,
})
cls.bank_journal_euro = cls.env['account.journal'].create({'name': 'Bank', 'type': 'bank', 'code': 'BNK67'})
cls.account_euro = cls.bank_journal_euro.default_account_id
cls.bank_journal_usd = cls.env['account.journal'].create({'name': 'Bank US', 'type': 'bank', 'code': 'BNK68', 'currency_id': cls.currency_usd_id})
cls.account_usd = cls.bank_journal_usd.default_account_id
cls.fx_journal = cls.company.currency_exchange_journal_id
cls.diff_income_account = cls.company.income_currency_exchange_account_id
cls.diff_expense_account = cls.company.expense_currency_exchange_account_id
cls.inbound_payment_method = cls.env['account.payment.method'].create({
'name': 'inbound',
'code': 'IN',
'payment_type': 'inbound',
})
cls.expense_account = cls.company_data['default_account_expense']
# cash basis intermediary account
cls.tax_waiting_account = cls.env['account.account'].create({
'name': 'TAX_WAIT',
'code': 'TWAIT',
'user_type_id': cls.env.ref('account.data_account_type_current_liabilities').id,
'reconcile': True,
'company_id': cls.company.id,
})
# cash basis final account
cls.tax_final_account = cls.env['account.account'].create({
'name': 'TAX_TO_DEDUCT',
'code': 'TDEDUCT',
'user_type_id': cls.env.ref('account.data_account_type_current_assets').id,
'company_id': cls.company.id,
})
cls.tax_base_amount_account = cls.env['account.account'].create({
'name': 'TAX_BASE',
'code': 'TBASE',
'user_type_id': cls.env.ref('account.data_account_type_current_assets').id,
'company_id': cls.company.id,
})
cls.company.account_cash_basis_base_account_id = cls.tax_base_amount_account.id
# Journals
cls.purchase_journal = cls.company_data['default_journal_purchase']
cls.cash_basis_journal = cls.env['account.journal'].create({
'name': 'CABA',
'code': 'CABA',
'type': 'general',
})
cls.general_journal = cls.company_data['default_journal_misc']
# Tax Cash Basis
cls.tax_cash_basis = cls.env['account.tax'].create({
'name': 'cash basis 20%',
'type_tax_use': 'purchase',
'company_id': cls.company.id,
'amount': 20,
'tax_exigibility': 'on_payment',
'cash_basis_transition_account_id': cls.tax_waiting_account.id,
'invoice_repartition_line_ids': [
(0,0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0,0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': cls.tax_final_account.id,
}),
],
'refund_repartition_line_ids': [
(0,0, {
'factor_percent': 100,
'repartition_type': 'base',
}),
(0,0, {
'factor_percent': 100,
'repartition_type': 'tax',
'account_id': cls.tax_final_account.id,
}),
],
})
cls.env['res.currency.rate'].create([
{
'currency_id': cls.env.ref('base.EUR').id,
'name': '2010-01-02',
'rate': 1.0,
}, {
'currency_id': cls.env.ref('base.USD').id,
'name': '2010-01-02',
'rate': 1.2834,
}, {
'currency_id': cls.env.ref('base.USD').id,
'name': time.strftime('%Y-06-05'),
'rate': 1.5289,
}
])
def _create_invoice(self, move_type='out_invoice', invoice_amount=50, currency_id=None, partner_id=None, date_invoice=None, payment_term_id=False, auto_validate=False):
date_invoice = date_invoice or time.strftime('%Y') + '-07-01'
invoice_vals = {
'move_type': move_type,
'partner_id': partner_id or self.partner_agrolait_id,
'invoice_date': date_invoice,
'date': date_invoice,
'invoice_line_ids': [(0, 0, {
'name': 'product that cost %s' % invoice_amount,
'quantity': 1,
'price_unit': invoice_amount,
'tax_ids': [(6, 0, [])],
})]
}
if payment_term_id:
invoice_vals['invoice_payment_term_id'] = payment_term_id
if currency_id:
invoice_vals['currency_id'] = currency_id
invoice = self.env['account.move'].with_context(default_move_type=type).create(invoice_vals)
if auto_validate:
invoice.action_post()
return invoice
def create_invoice(self, move_type='out_invoice', invoice_amount=50, currency_id=None):
return self._create_invoice(move_type=move_type, invoice_amount=invoice_amount, currency_id=currency_id, auto_validate=True)
def create_invoice_partner(self, move_type='out_invoice', invoice_amount=50, currency_id=None, partner_id=False, payment_term_id=False):
return self._create_invoice(
move_type=move_type,
invoice_amount=invoice_amount,
currency_id=currency_id,
partner_id=partner_id,
payment_term_id=payment_term_id,
auto_validate=True
)
def make_payment(self, invoice_record, bank_journal, amount=0.0, amount_currency=0.0, currency_id=None, reconcile_param=[]):
bank_stmt = self.env['account.bank.statement'].create({
'journal_id': bank_journal.id,
'date': time.strftime('%Y') + '-07-15',
'name': 'payment' + invoice_record.name,
'line_ids': [(0, 0, {
'payment_ref': 'payment',
'partner_id': self.partner_agrolait_id,
'amount': amount,
'amount_currency': amount_currency,
'foreign_currency_id': currency_id,
})],
})
bank_stmt.button_post()
bank_stmt.line_ids[0].reconcile(reconcile_param)
return bank_stmt
def make_customer_and_supplier_flows(self, invoice_currency_id, invoice_amount, bank_journal, amount, amount_currency, transaction_currency_id):
#we create an invoice in given invoice_currency
invoice_record = self.create_invoice(move_type='out_invoice', invoice_amount=invoice_amount, currency_id=invoice_currency_id)
#we encode a payment on it, on the given bank_journal with amount, amount_currency and transaction_currency given
line = invoice_record.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
bank_stmt = self.make_payment(invoice_record, bank_journal, amount=amount, amount_currency=amount_currency, currency_id=transaction_currency_id, reconcile_param=[{'id': line.id}])
customer_move_lines = bank_stmt.line_ids.line_ids
#we create a supplier bill in given invoice_currency
invoice_record = self.create_invoice(move_type='in_invoice', invoice_amount=invoice_amount, currency_id=invoice_currency_id)
#we encode a payment on it, on the given bank_journal with amount, amount_currency and transaction_currency given
line = invoice_record.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
bank_stmt = self.make_payment(invoice_record, bank_journal, amount=-amount, amount_currency=-amount_currency, currency_id=transaction_currency_id, reconcile_param=[{'id': line.id}])
supplier_move_lines = bank_stmt.line_ids.line_ids
return customer_move_lines, supplier_move_lines
| agpl-3.0 | -5,320,034,106,050,978,000 | 43.622318 | 189 | 0.523292 | false |
lipschultz/gnumeric-py | gnumeric/expression.py | 1 | 5340 | """
Gnumeric-py: Reading and writing gnumeric files with python
Copyright (C) 2017 Michael Lipschultz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Set, Union, Tuple
from gnumeric import utils, expression_evaluation
from gnumeric.evaluation_errors import EvaluationError
class Expression:
# TODO: what to do if originating cell's text changes or is deleted?
def __init__(self, id, worksheet, cell):
"""
:param cell: The cell this Expression was created from. It could be the originating cell or just a cell that
uses this expression. It's used when `id` is `None` (i.e. the expression isn't shared between cells).
"""
self.__exprid = id
self.__worksheet = worksheet
self.__cell = cell
def __get_raw_originating_cell(self):
"""
Returns a (int, int, str) tuple, where the values are (row, column, text of the cell).
"""
if self.__exprid is not None:
coords, text = self.__worksheet.get_expression_map()[self.__exprid]
return int(coords[0]), int(coords[1]), text
else:
return self.__cell.row, self.__cell.column, self.__cell.text
@property
def id(self):
"""
The expression id used to uniquely identify the expression within the sheet. This will be `None` if the
expression isn't shared between cells.
"""
return self.__exprid
@property
def original_text(self) -> str:
"""
Returns the text of the expression, with cell references from the perspective of the cell where the expression
is stored (i.e. the original cell).
"""
return self.__get_raw_originating_cell()[2]
@property
def text(self):
"""
Returns the text of the exprsesion, with cell references updated to be from the perspective of the cell using the expression.
"""
# TODO: fix this to actually return what it's supposed to
raise NotImplementedError
@property
def reference_coordinate_offset(self) -> Tuple[int, int]:
"""
The (row, col) offset to translate the original coordinates into the coordinates based at the current cell.
"""
original_coordinates = self.get_originating_cell_coordinate()
current_coordinates = self.__cell.coordinate
return current_coordinates[0] - original_coordinates[0], current_coordinates[1] - original_coordinates[1]
@property
def value(self):
"""
Returns the result of the expression's evaluation.
"""
return expression_evaluation.evaluate(self.original_text, self.__cell)
@property
def worksheet(self):
return self.__worksheet
def get_originating_cell_coordinate(self, representation_format='index') -> Union[Tuple[int, int], str]:
"""
Returns the cell coordinate for the cell Gnumeric is using to store the expression.
:param representation_format: For spreadsheet notation, use `'spreadsheet'`, for 0-indexed (row, column)
notation, use 'index' (default).
:return: A `str` if `representation_format` is `'spreadsheet'` and a tuple of ints `(int, int)` if 'index'.
"""
row, col = self.__get_raw_originating_cell()[:2]
if representation_format == 'index':
return row, col
elif representation_format == 'spreadsheet':
return utils.coordinate_to_spreadsheet(row, col)
def get_originating_cell(self):
"""
Returns the cell Gnumeric is using to store the expression.
"""
return self.__worksheet.cell(*self.get_originating_cell_coordinate(), create=False)
def get_all_cells(self, sort=False):
"""
Returns a list of all cells using this expression.
Use `sort` to specify whether the cells should be sorted. If `False` (default), then no sorting will take
place. If `sort` is `"row"`, then sorting will occur by row first, then by column within each row. If `sort`
is `"column"`, then the opposite will happen: first sort by column, then by row within each column.
"""
if self.__exprid is None:
return [self.__cell]
else:
return self.__worksheet.get_all_cells_with_expression(self.__exprid, sort=sort)
def get_referenced_cells(self) -> Union[Set, EvaluationError]:
return expression_evaluation.get_referenced_cells(self.original_text, self.__cell)
def __str__(self):
return self.original_text
def __repr__(self):
return 'Expression(id=%s, text="%s", ws=%s, cell=(%d, %d))' % (
self.id, self.original_text, self.__worksheet, self.__cell.row, self.__cell.column)
| gpl-3.0 | 175,873,206,152,406,270 | 39.763359 | 133 | 0.650375 | false |
stitchfix/pybossa | test/test_web.py | 1 | 151390 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
import json
import StringIO
from default import db, Fixtures, with_context
from helper import web
from mock import patch, Mock
from flask import Response
from itsdangerous import BadSignature
from collections import namedtuple
from pybossa.core import signer, mail
from pybossa.util import unicode_csv_reader
from pybossa.util import get_user_signup_method
from pybossa.ckan import Ckan
from bs4 import BeautifulSoup
from requests.exceptions import ConnectionError
from werkzeug.exceptions import NotFound
from pybossa.model.app import App
from pybossa.model.category import Category
from pybossa.model.task import Task
from pybossa.model.task_run import TaskRun
from pybossa.model.user import User
from factories import AppFactory, CategoryFactory, TaskFactory, TaskRunFactory
FakeRequest = namedtuple('FakeRequest', ['text', 'status_code', 'headers'])
class TestWeb(web.Helper):
pkg_json_not_found = {
"help": "Return ...",
"success": False,
"error": {
"message": "Not found",
"__type": "Not Found Error"}}
@with_context
def test_01_index(self):
"""Test WEB home page works"""
res = self.app.get("/", follow_redirects=True)
assert self.html_title() in res.data, res
assert "Create a Project" in res.data, res
@with_context
def test_01_search(self):
"""Test WEB search page works."""
res = self.app.get('/search')
err_msg = "Search page should be accessible"
assert "Search" in res.data, err_msg
@with_context
@patch('pybossa.stats.pygeoip', autospec=True)
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_02_stats(self, mock1, mock2):
"""Test WEB leaderboard or stats page works"""
with self.flask_app.app_context():
res = self.register()
res = self.signin()
res = self.new_application(short_name="igil")
returns = [Mock()]
returns[0].GeoIP.return_value = 'gic'
returns[0].GeoIP.record_by_addr.return_value = {}
mock1.side_effects = returns
app = db.session.query(App).first()
# Without stats
url = '/app/%s/stats' % app.short_name
res = self.app.get(url)
assert "Sorry" in res.data, res.data
# We use a string here to check that it works too
task = Task(app_id=app.id, n_answers=10)
db.session.add(task)
db.session.commit()
for i in range(10):
task_run = TaskRun(app_id=app.id, task_id=1,
user_id=1,
info={'answer': 1})
db.session.add(task_run)
db.session.commit()
self.app.get('api/app/%s/newtask' % app.id)
# With stats
url = '/app/%s/stats' % app.short_name
res = self.app.get(url)
assert res.status_code == 200, res.status_code
assert "Distribution" in res.data, res.data
with patch.dict(self.flask_app.config, {'GEO': True}):
url = '/app/%s/stats' % app.short_name
res = self.app.get(url)
assert "GeoLite" in res.data, res.data
res = self.app.get('/leaderboard', follow_redirects=True)
assert self.html_title("Community Leaderboard") in res.data, res
assert self.user.fullname in res.data, res.data
# With hidden project
app.hidden = 1
db.session.add(app)
db.session.commit()
url = '/app/%s/stats' % app.short_name
res = self.app.get(url)
assert res.status_code == 200, res.status_code
assert "Distribution" in res.data, res.data
self.signout()
self.create()
# As anonymous
url = '/app/%s/stats' % app.short_name
res = self.app.get(url)
assert res.status_code == 401, res.status_code
# As another user, but not owner
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
url = '/app/%s/stats' % app.short_name
res = self.app.get(url)
assert res.status_code == 403, res.status_code
@with_context
def test_03_account_index(self):
"""Test WEB account index works."""
# Without users
with self.flask_app.app_context():
res = self.app.get('/account/page/15', follow_redirects=True)
assert res.status_code == 404, res.status_code
self.create()
res = self.app.get('/account', follow_redirects=True)
assert res.status_code == 200, res.status_code
err_msg = "There should be a Community page"
assert "Community" in res.data, err_msg
@with_context
def test_register_get(self):
"""Test WEB register user works"""
res = self.app.get('/account/register')
# The output should have a mime-type: text/html
assert res.mimetype == 'text/html', res
assert self.html_title("Register") in res.data, res
@with_context
@patch('pybossa.view.account.mail')
@patch('pybossa.view.account.render_template')
@patch('pybossa.view.account.signer')
def test_register_post_creates_email_with_link(self, signer, render, mail):
"""Test WEB register post creates and sends the confirmation email if
account validation is enabled"""
from flask import current_app
current_app.config['ACCOUNT_CONFIRMATION_DISABLED'] = False
data = dict(fullname="John Doe", name="johndoe",
password="p4ssw0rd", confirm="p4ssw0rd",
email_addr="[email protected]")
signer.dumps.return_value = ''
render.return_value = ''
res = self.app.post('/account/register', data=data)
del data['confirm']
current_app.config['ACCOUNT_CONFIRMATION_DISABLED'] = True
signer.dumps.assert_called_with(data, salt='account-validation')
render.assert_any_call('/account/email/validate_account.md',
user=data,
confirm_url='http://localhost/account/register/confirmation?key=')
assert mail.send.called, "Mail was not sent"
@with_context
def test_register_post_valid_data_validation_enabled(self):
"""Test WEB register post with valid form data and account validation
enabled"""
from flask import current_app
current_app.config['ACCOUNT_CONFIRMATION_DISABLED'] = False
data = dict(fullname="John Doe", name="johndoe",
password="p4ssw0rd", confirm="p4ssw0rd",
email_addr="[email protected]")
res = self.app.post('/account/register', data=data)
current_app.config['ACCOUNT_CONFIRMATION_DISABLED'] = True
assert self.html_title() in res.data, res
assert "Just one more step, please" in res.data, res.data
from pybossa.view.applications import redirect
@with_context
@patch('pybossa.view.account.redirect', wraps=redirect)
@patch('pybossa.view.account.signer')
def test_register_post_valid_data_validation_disabled(self, signer, redirect):
"""Test WEB register post with valid form data and account validation
disabled redirects to the confirmation URL with valid arguments"""
data = dict(fullname="John Doe", name="johndoe",
password="p4ssw0rd", confirm="p4ssw0rd",
email_addr="[email protected]")
signer.dumps.return_value = 'key'
res = self.app.post('/account/register', data=data)
print dir(redirect)
redirect.assert_called_with('http://localhost/account/register/confirmation?key=key')
def test_register_confirmation_fails_without_key(self):
"""Test WEB register confirmation returns 403 if no 'key' param is present"""
res = self.app.get('/account/register/confirmation')
assert res.status_code == 403, res.status
def test_register_confirmation_fails_with_invalid_key(self):
"""Test WEB register confirmation returns 403 if an invalid key is given"""
res = self.app.get('/account/register/confirmation?key=invalid')
assert res.status_code == 403, res.status
@patch('pybossa.view.account.signer')
def test_register_confirmation_gets_account_data_from_key(self, fake_signer):
"""Test WEB register confirmation gets the account data from the key"""
fake_signer.loads.return_value = dict(fullname='FN', name='name',
email_addr='email', password='password')
res = self.app.get('/account/register/confirmation?key=valid-key')
fake_signer.loads.assert_called_with('valid-key', max_age=3600, salt='account-validation')
@patch('pybossa.view.account.signer')
def test_register_confirmation_creates_new_account(self, fake_signer):
"""Test WEB register confirmation creates the new account"""
fake_signer.loads.return_value = dict(fullname='FN', name='name',
email_addr='email', password='password')
res = self.app.get('/account/register/confirmation?key=valid-key')
user = db.session.query(User).filter_by(name='name').first()
assert user is not None
assert user.check_password('password')
@with_context
def test_04_signin_signout(self):
"""Test WEB sign in and sign out works"""
res = self.register()
# Log out as the registration already logs in the user
res = self.signout()
res = self.signin(method="GET")
assert self.html_title("Sign in") in res.data, res.data
assert "Sign in" in res.data, res.data
res = self.signin(email='')
assert "Please correct the errors" in res.data, res
assert "The e-mail is required" in res.data, res
res = self.signin(password='')
assert "Please correct the errors" in res.data, res
assert "You must provide a password" in res.data, res
res = self.signin(email='', password='')
assert "Please correct the errors" in res.data, res
assert "The e-mail is required" in res.data, res
assert "You must provide a password" in res.data, res
# Non-existant user
msg = "Ooops, we didn't find you in the system"
res = self.signin(email='wrongemail')
assert msg in res.data, res.data
res = self.signin(email='wrongemail', password='wrongpassword')
assert msg in res.data, res
# Real user but wrong password or username
msg = "Ooops, Incorrect email/password"
res = self.signin(password='wrongpassword')
assert msg in res.data, res
res = self.signin()
assert self.html_title() in res.data, res
assert "Welcome back %s" % self.user.fullname in res.data, res
# Check profile page with several information chunks
res = self.profile()
assert self.html_title("Profile") in res.data, res
assert self.user.fullname in res.data, res
assert self.user.email_addr in res.data, res
# Log out
res = self.signout()
assert self.html_title() in res.data, res
assert "You are now signed out" in res.data, res
# Request profile as an anonymous user
# Check profile page with several information chunks
res = self.profile()
assert self.user.fullname in res.data, res
assert self.user.email_addr not in res.data, res
# Try to access protected areas like update
res = self.app.get('/account/johndoe/update', follow_redirects=True)
# As a user must be signed in to access, the page the title will be the
# redirection to log in
assert self.html_title("Sign in") in res.data, res.data
assert "Please sign in to access this page." in res.data, res.data
res = self.signin(next='%2Faccount%2Fprofile')
assert self.html_title("Profile") in res.data, res
assert "Welcome back %s" % self.user.fullname in res.data, res
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_profile_applications(self, mock):
"""Test WEB user profile project page works."""
with self.flask_app.app_context():
self.create()
self.signin(email=Fixtures.email_addr, password=Fixtures.password)
self.new_application()
url = '/account/%s/applications' % Fixtures.name
res = self.app.get(url)
assert "Projects" in res.data, res.data
assert "Published" in res.data, res.data
assert "Draft" in res.data, res.data
assert Fixtures.app_name in res.data, res.data
url = '/account/fakename/applications'
res = self.app.get(url)
assert res.status_code == 404, res.status_code
url = '/account/%s/applications' % Fixtures.name2
res = self.app.get(url)
assert res.status_code == 403, res.status_code
@with_context
def test_05_update_user_profile(self):
"""Test WEB update user profile"""
# Create an account and log in
self.register()
url = "/account/fake/update"
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 404, res.status_code
# Update profile with new data
res = self.update_profile(method="GET")
msg = "Update your profile: %s" % self.user.fullname
assert self.html_title(msg) in res.data, res.data
msg = 'input id="id" name="id" type="hidden" value="1"'
assert msg in res.data, res
assert self.user.fullname in res.data, res
assert "Save the changes" in res.data, res
msg = '<a href="/account/johndoe/update" class="btn">Cancel</a>'
assert msg in res.data, res.data
res = self.update_profile(fullname="John Doe 2",
email_addr="johndoe2@example",
locale="en")
assert "Please correct the errors" in res.data, res.data
res = self.update_profile(fullname="John Doe 2",
email_addr="[email protected]",
locale="en")
title = "Update your profile: John Doe 2"
assert self.html_title(title) in res.data, res.data
assert "Your profile has been updated!" in res.data, res.data
assert "John Doe 2" in res.data, res
assert "johndoe" in res.data, res
assert "[email protected]" in res.data, res
# Updating the username field forces the user to re-log in
res = self.update_profile(fullname="John Doe 2",
email_addr="[email protected]",
locale="en",
new_name="johndoe2")
assert "Your profile has been updated!" in res.data, res
assert "Please sign in" in res.data, res.data
res = self.signin(method="POST", email="[email protected]",
password="p4ssw0rd",
next="%2Faccount%2Fprofile")
assert "Welcome back John Doe 2" in res.data, res.data
assert "John Doe 2" in res.data, res
assert "johndoe2" in res.data, res
assert "[email protected]" in res.data, res
res = self.signout()
assert self.html_title() in res.data, res
assert "You are now signed out" in res.data, res
# A user must be signed in to access the update page, the page
# the title will be the redirection to log in
res = self.update_profile(method="GET")
assert self.html_title("Sign in") in res.data, res
assert "Please sign in to access this page." in res.data, res
# A user must be signed in to access the update page, the page
# the title will be the redirection to log in
res = self.update_profile()
assert self.html_title("Sign in") in res.data, res
assert "Please sign in to access this page." in res.data, res
self.register(fullname="new", name="new")
url = "/account/johndoe2/update"
res = self.app.get(url)
assert res.status_code == 403
@with_context
def test_05a_get_nonexistant_app(self):
"""Test WEB get not existant project should return 404"""
res = self.app.get('/app/nonapp', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
@with_context
def test_05b_get_nonexistant_app_newtask(self):
"""Test WEB get non existant project newtask should return 404"""
res = self.app.get('/app/noapp/presenter', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
res = self.app.get('/app/noapp/newtask', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
@with_context
def test_05c_get_nonexistant_app_tutorial(self):
"""Test WEB get non existant project tutorial should return 404"""
res = self.app.get('/app/noapp/tutorial', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
@with_context
def test_05d_get_nonexistant_app_delete(self):
"""Test WEB get non existant project delete should return 404"""
self.register()
# GET
res = self.app.get('/app/noapp/delete', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.data
# POST
res = self.delete_application(short_name="noapp")
assert res.status == '404 NOT FOUND', res.status
@with_context
def test_05d_get_nonexistant_app_update(self):
"""Test WEB get non existant project update should return 404"""
self.register()
# GET
res = self.app.get('/app/noapp/update', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# POST
res = self.update_application(short_name="noapp")
assert res.status == '404 NOT FOUND', res.status
@with_context
def test_05d_get_nonexistant_app_import(self):
"""Test WEB get non existant project import should return 404"""
self.register()
# GET
res = self.app.get('/app/noapp/import', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# POST
res = self.app.post('/app/noapp/import', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
@with_context
def test_05d_get_nonexistant_app_task(self):
"""Test WEB get non existant project task should return 404"""
res = self.app.get('/app/noapp/task', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Pagination
res = self.app.get('/app/noapp/task/25', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
@with_context
def test_05d_get_nonexistant_app_results_json(self):
"""Test WEB get non existant project results json should return 404"""
res = self.app.get('/app/noapp/24/results.json', follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
@with_context
def test_06_applications_without_apps(self):
"""Test WEB projects index without projects works"""
# Check first without apps
with self.flask_app.app_context():
self.create_categories()
res = self.app.get('/app', follow_redirects=True)
assert "Projects" in res.data, res.data
assert Fixtures.cat_1 in res.data, res.data
@with_context
def test_06_applications_2(self):
"""Test WEB projects index with projects"""
with self.flask_app.app_context():
self.create()
res = self.app.get('/app', follow_redirects=True)
assert self.html_title("Projects") in res.data, res.data
assert "Projects" in res.data, res.data
assert Fixtures.app_short_name in res.data, res.data
@with_context
def test_06_featured_apps(self):
"""Test WEB projects index shows featured projects in all the pages works"""
with self.flask_app.app_context():
self.create()
app = db.session.query(App).get(1)
app.featured = True
db.session.add(app)
db.session.commit()
res = self.app.get('/app', follow_redirects=True)
assert self.html_title("Projects") in res.data, res.data
assert "Projects" in res.data, res.data
assert '/app/test-app' in res.data, res.data
assert '<h2><a href="/app/test-app/">My New Project</a></h2>' in res.data, res.data
# Update one task to have more answers than expected
task = db.session.query(Task).get(1)
task.n_answers=1
db.session.add(task)
db.session.commit()
task = db.session.query(Task).get(1)
cat = db.session.query(Category).get(1)
url = '/app/category/featured/'
res = self.app.get(url, follow_redirects=True)
assert '1 Featured Projects' in res.data, res.data
@with_context
@patch('pybossa.ckan.requests.get')
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_10_get_application(self, Mock, mock2):
"""Test WEB project URL/<short_name> works"""
# Sign in and create a project
with self.flask_app.app_context():
html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,
{'content-type': 'application/json'})
Mock.return_value = html_request
self.register()
res = self.new_application()
res = self.app.get('/app/sampleapp', follow_redirects=True)
msg = "Project: Sample Project"
assert self.html_title(msg) in res.data, res
err_msg = "There should be a contribute button"
assert "Start Contributing Now" in res.data, err_msg
res = self.app.get('/app/sampleapp/settings', follow_redirects=True)
assert res.status == '200 OK', res.status
self.signout()
# Now as an anonymous user
res = self.app.get('/app/sampleapp', follow_redirects=True)
assert self.html_title("Project: Sample Project") in res.data, res
assert "Start Contributing Now" in res.data, err_msg
res = self.app.get('/app/sampleapp/settings', follow_redirects=True)
assert res.status == '200 OK', res.status
err_msg = "Anonymous user should be redirected to sign in page"
assert "Please sign in to access this page" in res.data, err_msg
# Now with a different user
self.register(fullname="Perico Palotes", name="perico")
res = self.app.get('/app/sampleapp', follow_redirects=True)
assert self.html_title("Project: Sample Project") in res.data, res
assert "Start Contributing Now" in res.data, err_msg
res = self.app.get('/app/sampleapp/settings')
assert res.status == '403 FORBIDDEN', res.status
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_10b_application_long_description_allows_markdown(self, mock):
"""Test WEB long description markdown is supported"""
with self.flask_app.app_context():
markdown_description = u'Markdown\n======='
self.register()
self.new_application(long_description=markdown_description)
res = self.app.get('/app/sampleapp', follow_redirects=True)
data = res.data
assert '<h1>Markdown</h1>' in data, 'Markdown text not being rendered!'
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_11_create_application(self, mock):
"""Test WEB create a project works"""
# Create a project as an anonymous user
with self.flask_app.app_context():
res = self.new_application(method="GET")
assert self.html_title("Sign in") in res.data, res
assert "Please sign in to access this page" in res.data, res
res = self.new_application()
assert self.html_title("Sign in") in res.data, res.data
assert "Please sign in to access this page." in res.data, res.data
# Sign in and create a project
res = self.register()
res = self.new_application(method="GET")
assert self.html_title("Create a Project") in res.data, res
assert "Create the project" in res.data, res
res = self.new_application(long_description='My Description')
assert "<strong>Sample Project</strong>: Update the project" in res.data
assert "Project created!" in res.data, res
app = db.session.query(App).first()
assert app.name == 'Sample Project', 'Different names %s' % app.name
assert app.short_name == 'sampleapp', \
'Different names %s' % app.short_name
assert app.long_description == 'My Description', \
"Long desc should be the same: %s" % app.long_description
assert app.category is not None, \
"A project should have a category after being created"
# After refactoring applications view, these 3 tests should be more isolated and moved to another place
@with_context
def test_description_is_generated_from_long_desc(self):
"""Test WEB when creating a project, the description field is
automatically filled in by truncating the long_description"""
self.register()
res = self.new_application(long_description="Hello")
app = db.session.query(App).first()
assert app.description == "Hello", app.description
@with_context
def test_description_is_generated_from_long_desc_formats(self):
"""Test WEB when when creating a project, the description generated
from the long_description is only text (no html, no markdown)"""
self.register()
res = self.new_application(long_description="## Hello")
app = db.session.query(App).first()
assert '##' not in app.description, app.description
assert '<h2>' not in app.description, app.description
@with_context
def test_description_is_generated_from_long_desc_truncates(self):
"""Test WEB when when creating a project, the description generated
from the long_description is only text (no html, no markdown)"""
self.register()
res = self.new_application(long_description="a"*300)
app = db.session.query(App).first()
assert len(app.description) == 255, len(app.description)
assert app.description[-3:] == '...'
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_11_a_create_application_errors(self, mock):
"""Test WEB create a project issues the errors"""
with self.flask_app.app_context():
self.register()
# Required fields checks
# Issue the error for the app.name
res = self.new_application(name="")
err_msg = "A project must have a name"
assert "This field is required" in res.data, err_msg
# Issue the error for the app.short_name
res = self.new_application(short_name="")
err_msg = "A project must have a short_name"
assert "This field is required" in res.data, err_msg
# Issue the error for the app.description
res = self.new_application(long_description="")
err_msg = "A project must have a description"
assert "This field is required" in res.data, err_msg
# Issue the error for the app.short_name
res = self.new_application(short_name='$#/|')
err_msg = "A project must have a short_name without |/$# chars"
assert '$#&\/| and space symbols are forbidden' in res.data, err_msg
# Now Unique checks
self.new_application()
res = self.new_application()
err_msg = "There should be a Unique field"
assert "Name is already taken" in res.data, err_msg
assert "Short Name is already taken" in res.data, err_msg
@with_context
@patch('pybossa.ckan.requests.get')
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_12_update_application(self, Mock, mock):
"""Test WEB update project works"""
with self.flask_app.app_context():
html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,
{'content-type': 'application/json'})
Mock.return_value = html_request
self.register()
self.new_application()
# Get the Update Project web page
res = self.update_application(method="GET")
msg = "Project: Sample Project · Update"
assert self.html_title(msg) in res.data, res
msg = 'input id="id" name="id" type="hidden" value="1"'
assert msg in res.data, res
assert "Save the changes" in res.data, res
# Check form validation
res = self.update_application(new_name="",
new_short_name="",
new_description="New description",
new_long_description='New long desc',
new_hidden=True)
assert "Please correct the errors" in res.data, res.data
# Update the project
res = self.update_application(new_name="New Sample Project",
new_short_name="newshortname",
new_description="New description",
new_long_description='New long desc',
new_hidden=True)
app = db.session.query(App).first()
assert "Project updated!" in res.data, res
err_msg = "Project name not updated %s" % app.name
assert app.name == "New Sample Project", err_msg
err_msg = "Project short name not updated %s" % app.short_name
assert app.short_name == "newshortname", err_msg
err_msg = "Project description not updated %s" % app.description
assert app.description == "New description", err_msg
err_msg = "Project long description not updated %s" % app.long_description
assert app.long_description == "New long desc", err_msg
err_msg = "Project hidden not updated %s" % app.hidden
assert app.hidden == 1, err_msg
# Check that the owner can access it even though is hidden
user = db.session.query(User).filter_by(name='johndoe').first()
user.admin = False
db.session.add(user)
db.session.commit()
res = self.app.get('/app/newshortname/')
err_msg = "Owner should be able to see his hidden app"
assert app.name in res.data, err_msg
self.signout()
res = self.register(fullname='Paco', name='paco')
url = '/app/newshortname/'
res = self.app.get(url, follow_redirects=True)
assert "Forbidden" in res.data, res.data
assert res.status_code == 403
tmp = db.session.query(App).first()
tmp.hidden = 0
db.session.add(tmp)
db.session.commit()
url = '/app/newshortname/update'
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 403, res.status_code
tmp.hidden = 1
db.session.add(tmp)
db.session.commit()
user = db.session.query(User).filter_by(name='paco').first()
user.admin = True
db.session.add(user)
db.session.commit()
res = self.app.get('/app/newshortname/')
err_msg = "Root user should be able to see his hidden app"
assert app.name in res.data, err_msg
@with_context
def test_add_password_to_project(self):
"""Test WEB update sets a password for the project"""
self.register()
owner = db.session.query(User).first()
app = AppFactory.create(owner=owner)
self.update_application(id=app.id, short_name=app.short_name,
new_password='mysecret')
assert app.needs_password(), 'Password not set"'
@with_context
def test_remove_password_from_project(self):
"""Test WEB update removes the password of the project"""
self.register()
owner = db.session.query(User).first()
app = AppFactory.create(info={'passwd_hash': 'mysecret'}, owner=owner)
self.update_application(id=app.id, short_name=app.short_name,
new_password='')
assert not app.needs_password(), 'Password not deleted'
@with_context
def test_update_application_errors(self):
"""Test WEB update form validation issues the errors"""
with self.flask_app.app_context():
self.register()
self.new_application()
res = self.update_application(new_name="")
assert "This field is required" in res.data
res = self.update_application(new_short_name="")
assert "This field is required" in res.data
res = self.update_application(new_description="")
assert "You must provide a description." in res.data
res = self.update_application(new_description="a"*256)
assert "Field cannot be longer than 255 characters." in res.data
res = self.update_application(new_long_description="")
assert "This field is required" not in res.data
@with_context
@patch('pybossa.ckan.requests.get')
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_13_hidden_applications(self, Mock, mock):
"""Test WEB hidden project works"""
with self.flask_app.app_context():
html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,
{'content-type': 'application/json'})
Mock.return_value = html_request
self.register()
self.new_application()
self.update_application(new_hidden=True)
self.signout()
res = self.app.get('/app/', follow_redirects=True)
assert "Sample Project" not in res.data, res
res = self.app.get('/app/sampleapp', follow_redirects=True)
err_msg = "Hidden apps should return a 403"
res.status_code == 403, err_msg
@with_context
@patch('pybossa.ckan.requests.get')
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_13a_hidden_applications_owner(self, Mock, mock):
"""Test WEB hidden projects are shown to their owners"""
with self.flask_app.app_context():
html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,
{'content-type': 'application/json'})
Mock.return_value = html_request
self.register()
self.new_application()
self.update_application(new_hidden=True)
res = self.app.get('/app/', follow_redirects=True)
assert "Sample Project" not in res.data, ("Projects should be hidden"
"in the index")
res = self.app.get('/app/sampleapp', follow_redirects=True)
assert "Sample Project" in res.data, ("Project should be shown to"
"the owner")
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_14_delete_application(self, mock):
"""Test WEB delete project works"""
with self.flask_app.app_context():
self.create()
self.register()
self.new_application()
res = self.delete_application(method="GET")
msg = "Project: Sample Project · Delete"
assert self.html_title(msg) in res.data, res
assert "No, do not delete it" in res.data, res
app = db.session.query(App).filter_by(short_name='sampleapp').first()
app.hidden = 1
db.session.add(app)
db.session.commit()
res = self.delete_application(method="GET")
msg = "Project: Sample Project · Delete"
assert self.html_title(msg) in res.data, res
assert "No, do not delete it" in res.data, res
res = self.delete_application()
assert "Project deleted!" in res.data, res
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.delete_application(short_name=Fixtures.app_short_name)
assert res.status_code == 403, res.status_code
@with_context
def test_15_twitter_email_warning(self):
"""Test WEB Twitter email warning works"""
# This test assumes that the user allows Twitter to authenticate,
# returning a valid resp. The only difference is a user object
# without a password
# Register a user and sign out
with self.flask_app.app_context():
user = User(name="tester", passwd_hash="tester",
fullname="tester",
email_addr="tester")
user.set_password('tester')
db.session.add(user)
db.session.commit()
db.session.query(User).all()
# Sign in again and check the warning message
self.signin(email="tester", password="tester")
res = self.app.get('/', follow_redirects=True)
msg = "Please update your e-mail address in your profile page, " \
"right now it is empty!"
user = db.session.query(User).get(1)
assert msg in res.data, res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_16_task_status_completed(self, mock):
"""Test WEB Task Status Completed works"""
with self.flask_app.app_context():
self.register()
self.new_application()
app = db.session.query(App).first()
# We use a string here to check that it works too
task = Task(app_id=app.id, n_answers = 10)
db.session.add(task)
db.session.commit()
res = self.app.get('app/%s/tasks/browse' % (app.short_name),
follow_redirects=True)
dom = BeautifulSoup(res.data)
assert "Sample Project" in res.data, res.data
assert '0 of 10' in res.data, res.data
err_msg = "Download button should be disabled"
assert dom.find(id='nothingtodownload') is not None, err_msg
for i in range(5):
task_run = TaskRun(app_id=app.id, task_id=1,
info={'answer': 1})
db.session.add(task_run)
db.session.commit()
self.app.get('api/app/%s/newtask' % app.id)
res = self.app.get('app/%s/tasks/browse' % (app.short_name),
follow_redirects=True)
dom = BeautifulSoup(res.data)
assert "Sample Project" in res.data, res.data
assert '5 of 10' in res.data, res.data
err_msg = "Download Partial results button should be shown"
assert dom.find(id='partialdownload') is not None, err_msg
for i in range(5):
task_run = TaskRun(app_id=app.id, task_id=1,
info={'answer': 1})
db.session.add(task_run)
db.session.commit()
self.app.get('api/app/%s/newtask' % app.id)
self.signout()
app = db.session.query(App).first()
res = self.app.get('app/%s/tasks/browse' % (app.short_name),
follow_redirects=True)
assert "Sample Project" in res.data, res.data
msg = 'Task <span class="label label-success">#1</span>'
assert msg in res.data, res.data
assert '10 of 10' in res.data, res.data
dom = BeautifulSoup(res.data)
err_msg = "Download Full results button should be shown"
assert dom.find(id='fulldownload') is not None, err_msg
app.hidden = 1
db.session.add(app)
db.session.commit()
res = self.app.get('app/%s/tasks/browse' % (app.short_name),
follow_redirects=True)
assert res.status_code == 401, res.status_code
self.create()
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.app.get('app/%s/tasks/browse' % (app.short_name),
follow_redirects=True)
assert res.status_code == 403, res.status_code
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_17_export_task_runs(self, mock):
"""Test WEB TaskRun export works"""
with self.flask_app.app_context():
self.register()
self.new_application()
app = db.session.query(App).first()
task = Task(app_id=app.id, n_answers = 10)
db.session.add(task)
db.session.commit()
for i in range(10):
task_run = TaskRun(app_id=app.id, task_id=1, info={'answer': 1})
db.session.add(task_run)
db.session.commit()
app = db.session.query(App).first()
res = self.app.get('app/%s/%s/results.json' % (app.short_name, 1),
follow_redirects=True)
data = json.loads(res.data)
assert len(data) == 10, data
for tr in data:
assert tr['info']['answer'] == 1, tr
# Check with correct app but wrong task id
res = self.app.get('app/%s/%s/results.json' % (app.short_name, 5000),
follow_redirects=True)
assert res.status_code == 404, res.status_code
# Check with hidden app: owner should have access to it
app.hidden = 1
db.session.add(app)
db.session.commit()
res = self.app.get('app/%s/%s/results.json' % (app.short_name, 1),
follow_redirects=True)
data = json.loads(res.data)
assert len(data) == 10, data
for tr in data:
assert tr['info']['answer'] == 1, tr
self.signout()
# Check with hidden app: non-owner should not have access to it
self.register(fullname="Non Owner", name="nonowner")
res = self.app.get('app/%s/%s/results.json' % (app.short_name, 1),
follow_redirects=True)
assert res.status_code == 403, res.data
assert "Forbidden" in res.data, res.data
# Check with hidden app: anonymous should not have access to it
self.signout()
res = self.app.get('app/%s/%s/results.json' % (app.short_name, 1),
follow_redirects=True)
assert res.status_code == 401, res.data
assert "Unauthorized" in res.data, res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_18_task_status_wip(self, mock):
"""Test WEB Task Status on going works"""
with self.flask_app.app_context():
self.register()
self.new_application()
app = db.session.query(App).first()
task = Task(app_id=app.id, n_answers = 10)
db.session.add(task)
db.session.commit()
self.signout()
app = db.session.query(App).first()
res = self.app.get('app/%s/tasks/browse' % (app.short_name),
follow_redirects=True)
assert "Sample Project" in res.data, res.data
msg = 'Task <span class="label label-info">#1</span>'
assert msg in res.data, res.data
assert '0 of 10' in res.data, res.data
# For a non existing page
res = self.app.get('app/%s/tasks/browse/5000' % (app.short_name),
follow_redirects=True)
assert res.status_code == 404, res.status_code
@with_context
def test_19_app_index_categories(self):
"""Test WEB Project Index categories works"""
with self.flask_app.app_context():
self.register()
self.create()
self.signout()
res = self.app.get('app', follow_redirects=True)
assert "Projects" in res.data, res.data
assert Fixtures.cat_1 in res.data, res.data
task = db.session.query(Task).get(1)
# Update one task to have more answers than expected
task.n_answers=1
db.session.add(task)
db.session.commit()
task = db.session.query(Task).get(1)
cat = db.session.query(Category).get(1)
url = '/app/category/%s/' % Fixtures.cat_1
res = self.app.get(url, follow_redirects=True)
tmp = '1 %s Projects' % Fixtures.cat_1
assert tmp in res.data, res
@with_context
def test_app_index_categories_pagination(self):
"""Test WEB Project Index categories pagination works"""
from flask import current_app
n_apps = current_app.config.get('APPS_PER_PAGE')
current_app.config['APPS_PER_PAGE'] = 1
category = CategoryFactory.create(name='category', short_name='cat')
for project in AppFactory.create_batch(2, category=category):
TaskFactory.create(app=project)
page1 = self.app.get('/app/category/%s/' % category.short_name)
page2 = self.app.get('/app/category/%s/page/2/' % category.short_name)
current_app.config['APPS_PER_PAGE'] = n_apps
assert '<a href="/app/category/cat/page/2/">Next »</a>' in page1.data
assert page2.status_code == 200, page2.status_code
assert '<a href="/app/category/cat/">« Prev </a>' in page2.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_20_app_index_published(self, mock):
"""Test WEB Project Index published works"""
with self.flask_app.app_context():
self.register()
self.new_application()
self.update_application(new_category_id="1")
app = db.session.query(App).first()
info = dict(task_presenter="some html")
app.info = info
db.session.commit()
task = Task(app_id=app.id, n_answers = 10)
db.session.add(task)
db.session.commit()
self.signout()
res = self.app.get('app', follow_redirects=True)
assert "%s Projects" % Fixtures.cat_1 in res.data, res.data
assert "draft" not in res.data, res.data
assert "Sample Project" in res.data, res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_20_app_index_draft(self, mock):
"""Test WEB Project Index draft works"""
# Create root
with self.flask_app.app_context():
self.register()
self.new_application()
self.signout()
# Create a user
self.register(fullname="jane", name="jane", email="[email protected]")
self.signout()
# As Anonymous
res = self.app.get('/app/category/draft', follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "Anonymous should not see draft apps"
assert dom.find(id='signin') is not None, err_msg
# As authenticated but not admin
self.signin(email="[email protected]", password="p4ssw0rd")
res = self.app.get('/app/category/draft', follow_redirects=True)
assert res.status_code == 403, "Non-admin should not see draft apps"
self.signout()
# As Admin
self.signin()
res = self.app.get('/app/category/draft', follow_redirects=True)
assert "project-published" not in res.data, res.data
assert "draft" in res.data, res.data
assert "Sample Project" in res.data, res.data
assert '1 Draft Projects' in res.data, res.data
@with_context
def test_21_get_specific_ongoing_task_anonymous(self):
"""Test WEB get specific ongoing task_id for
a project works as anonymous"""
with self.flask_app.app_context():
self.create()
self.delete_task_runs()
app = db.session.query(App).first()
task = db.session.query(Task)\
.filter(App.id == app.id)\
.first()
res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),
follow_redirects=True)
assert 'TaskPresenter' in res.data, res.data
msg = "?next=%2Fapp%2F" + app.short_name + "%2Ftask%2F" + str(task.id)
assert msg in res.data, res.data
# Try with a hidden app
app.hidden = 1
db.session.add(app)
db.session.commit()
res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),
follow_redirects=True)
assert 'Unauthorized' in res.data, res.data
assert res.status_code == 401, res.status_code
# Try with only registered users
app.allow_anonymous_contributors = False
app.hidden = 0
db.session.add(app)
db.session.commit()
res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),
follow_redirects=True)
assert "sign in to participate" in res.data
@with_context
def test_23_get_specific_ongoing_task_user(self):
"""Test WEB get specific ongoing task_id for a project works as an user"""
with self.flask_app.app_context():
self.create()
self.delete_task_runs()
self.register()
self.signin()
app = db.session.query(App).first()
task = db.session.query(Task)\
.filter(App.id == app.id)\
.first()
res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),
follow_redirects=True)
assert 'TaskPresenter' in res.data, res.data
self.signout()
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_25_get_wrong_task_app(self, mock):
"""Test WEB get wrong task.id for a project works"""
with self.flask_app.app_context():
self.create()
app1 = db.session.query(App).get(1)
app1_short_name = app1.short_name
db.session.query(Task)\
.filter(Task.app_id == 1)\
.first()
self.register()
self.new_application()
app2 = db.session.query(App).get(2)
self.new_task(app2.id)
task2 = db.session.query(Task)\
.filter(Task.app_id == 2)\
.first()
task2_id = task2.id
self.signout()
res = self.app.get('/app/%s/task/%s' % (app1_short_name, task2_id))
assert "Error" in res.data, res.data
msg = "This task does not belong to %s" % app1_short_name
assert msg in res.data, res.data
@with_context
def test_26_tutorial_signed_user(self):
"""Test WEB tutorials work as signed in user"""
with self.flask_app.app_context():
self.create()
app1 = db.session.query(App).get(1)
app1.info = dict(tutorial="some help")
db.session.commit()
self.register()
# First time accessing the app should redirect me to the tutorial
res = self.app.get('/app/test-app/newtask', follow_redirects=True)
err_msg = "There should be some tutorial for the project"
assert "some help" in res.data, err_msg
# Second time should give me a task, and not the tutorial
res = self.app.get('/app/test-app/newtask', follow_redirects=True)
assert "some help" not in res.data
# Check if the tutorial can be accessed directly
res = self.app.get('/app/test-app/tutorial', follow_redirects=True)
err_msg = "There should be some tutorial for the project"
assert "some help" in res.data, err_msg
# Hidden app
app1.hidden = 1
db.session.add(app1)
db.session.commit()
url = '/app/%s/tutorial' % app1.short_name
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 403, res.status_code
@with_context
def test_27_tutorial_anonymous_user(self):
"""Test WEB tutorials work as an anonymous user"""
with self.flask_app.app_context():
self.create()
app1 = db.session.query(App).get(1)
app1.info = dict(tutorial="some help")
db.session.commit()
# First time accessing the app should redirect me to the tutorial
res = self.app.get('/app/test-app/newtask', follow_redirects=True)
err_msg = "There should be some tutorial for the project"
assert "some help" in res.data, err_msg
# Second time should give me a task, and not the tutorial
res = self.app.get('/app/test-app/newtask', follow_redirects=True)
assert "some help" not in res.data
# Check if the tutorial can be accessed directly
res = self.app.get('/app/test-app/tutorial', follow_redirects=True)
err_msg = "There should be some tutorial for the project"
assert "some help" in res.data, err_msg
# Hidden app
app1.hidden = 1
db.session.add(app1)
db.session.commit()
res = self.app.get('/app/test-app/tutorial', follow_redirects=True)
assert res.status_code == 401, res.status_code
@with_context
def test_28_non_tutorial_signed_user(self):
"""Test WEB project without tutorial work as signed in user"""
with self.flask_app.app_context():
self.create()
db.session.commit()
self.register()
# First time accessing the app should redirect me to the tutorial
res = self.app.get('/app/test-app/newtask', follow_redirects=True)
err_msg = "There should not be a tutorial for the project"
assert "some help" not in res.data, err_msg
# Second time should give me a task, and not the tutorial
res = self.app.get('/app/test-app/newtask', follow_redirects=True)
assert "some help" not in res.data
@with_context
def test_29_tutorial_anonymous_user(self):
"""Test WEB project without tutorials work as an anonymous user"""
with self.flask_app.app_context():
self.create()
db.session.commit()
self.register()
# First time accessing the app should redirect me to the tutorial
res = self.app.get('/app/test-app/newtask', follow_redirects=True)
err_msg = "There should not be a tutorial for the project"
assert "some help" not in res.data, err_msg
# Second time should give me a task, and not the tutorial
res = self.app.get('/app/test-app/newtask', follow_redirects=True)
assert "some help" not in res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_30_app_id_owner(self, mock):
"""Test WEB project settings page shows the ID to the owner"""
self.register()
self.new_application()
res = self.app.get('/app/sampleapp/settings', follow_redirects=True)
assert "Sample Project" in res.data, ("Project should be shown to "
"the owner")
msg = '<strong><i class="icon-cog"></i> ID</strong>: 1'
err_msg = "Project ID should be shown to the owner"
assert msg in res.data, err_msg
self.signout()
with self.flask_app.app_context():
self.create()
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.app.get('/app/sampleapp/settings', follow_redirects=True)
assert res.status_code == 403, res.status_code
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.ckan.requests.get')
def test_30_app_id_anonymous_user(self, Mock, mock):
"""Test WEB project page does not show the ID to anonymous users"""
html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,
{'content-type': 'application/json'})
Mock.return_value = html_request
self.register()
self.new_application()
self.signout()
res = self.app.get('/app/sampleapp', follow_redirects=True)
assert "Sample Project" in res.data, ("Project name should be shown"
" to users")
assert '<strong><i class="icon-cog"></i> ID</strong>: 1' not in \
res.data, "Project ID should be shown to the owner"
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_31_user_profile_progress(self, mock):
"""Test WEB user progress profile page works"""
self.register()
self.new_application()
app = db.session.query(App).first()
task = Task(app_id=app.id, n_answers = 10)
db.session.add(task)
db.session.commit()
for i in range(10):
task_run = TaskRun(app_id=app.id, task_id=1, user_id=1,
info={'answer': 1})
db.session.add(task_run)
db.session.commit()
self.app.get('api/app/%s/newtask' % app.id)
res = self.app.get('account/johndoe', follow_redirects=True)
assert "Sample Project" in res.data, res.data
assert "Contribute!" in res.data, "There should be a Contribute button"
@with_context
def test_32_oauth_password(self):
"""Test WEB user sign in without password works"""
user = User(email_addr="[email protected]",
name=self.user.username,
passwd_hash=None,
fullname=self.user.fullname,
api_key="api-key")
db.session.add(user)
db.session.commit()
res = self.signin()
assert "Ooops, we didn't find you in the system" in res.data, res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_33_bulk_csv_import_unauthorized(self, Mock, mock):
"""Test WEB bulk import unauthorized works"""
unauthorized_request = FakeRequest('Unauthorized', 403,
{'content-type': 'text/csv'})
Mock.return_value = unauthorized_request
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',
'formtype': 'csv', 'form_name': 'csv'},
follow_redirects=True)
msg = "Oops! It looks like you don't have permission to access that file"
assert msg in res.data, res.data
@with_context
@patch('pybossa.importers.requests.get')
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_34_bulk_csv_import_non_html(self, Mock, mock):
"""Test WEB bulk import non html works"""
html_request = FakeRequest('Not a CSV', 200,
{'content-type': 'text/html'})
Mock.return_value = html_request
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',
'form_name': 'csv'},
follow_redirects=True)
assert "Oops! That file doesn't look like the right file." in res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_35_bulk_csv_import_non_html(self, Mock, mock):
"""Test WEB bulk import non html works"""
empty_file = FakeRequest('CSV,with,no,content\n', 200,
{'content-type': 'text/plain'})
Mock.return_value = empty_file
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',
'formtype': 'csv', 'form_name': 'csv'},
follow_redirects=True)
assert "Oops! It looks like the file is empty." in res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_36_bulk_csv_import_dup_header(self, Mock, mock):
"""Test WEB bulk import duplicate header works"""
empty_file = FakeRequest('Foo,Bar,Foo\n1,2,3', 200,
{'content-type': 'text/plain'})
Mock.return_value = empty_file
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',
'formtype': 'csv', 'form_name': 'csv'},
follow_redirects=True)
msg = "The file you uploaded has two headers with the same name"
assert msg in res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_37_bulk_csv_import_no_column_names(self, Mock, mock):
"""Test WEB bulk import no column names works"""
empty_file = FakeRequest('Foo,Bar,Baz\n1,2,3', 200,
{'content-type': 'text/plain'})
Mock.return_value = empty_file
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',
'formtype': 'csv', 'form_name': 'csv'},
follow_redirects=True)
task = db.session.query(Task).first()
assert {u'Bar': u'2', u'Foo': u'1', u'Baz': u'3'} == task.info
assert "1 Task imported successfully!" in res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_38_bulk_csv_import_with_column_name(self, Mock, mock):
"""Test WEB bulk import with column name works"""
empty_file = FakeRequest('Foo,Bar,priority_0\n1,2,3', 200,
{'content-type': 'text/plain'})
Mock.return_value = empty_file
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',
'formtype': 'csv', 'form_name': 'csv'},
follow_redirects=True)
task = db.session.query(Task).first()
assert {u'Bar': u'2', u'Foo': u'1'} == task.info
assert task.priority_0 == 3
assert "1 Task imported successfully!" in res.data
# Check that only new items are imported
empty_file = FakeRequest('Foo,Bar,priority_0\n1,2,3\n4,5,6', 200,
{'content-type': 'text/plain'})
Mock.return_value = empty_file
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',
'formtype': 'csv', 'form_name': 'csv'},
follow_redirects=True)
app = db.session.query(App).first()
assert len(app.tasks) == 2, "There should be only 2 tasks"
n = 0
csv_tasks = [{u'Foo': u'1', u'Bar': u'2'}, {u'Foo': u'4', u'Bar': u'5'}]
for t in app.tasks:
assert t.info == csv_tasks[n], "The task info should be the same"
n += 1
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_38_bulk_gdocs_import(self, Mock, mock):
"""Test WEB bulk GDocs import works."""
empty_file = FakeRequest('Foo,Bar,priority_0\n1,2,3', 200,
{'content-type': 'text/plain'})
Mock.return_value = empty_file
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'googledocs_url': 'http://drive.google.com',
'formtype': 'gdocs', 'form_name': 'gdocs'},
follow_redirects=True)
task = db.session.query(Task).first()
assert {u'Bar': u'2', u'Foo': u'1'} == task.info
assert task.priority_0 == 3
assert "1 Task imported successfully!" in res.data
# Check that only new items are imported
empty_file = FakeRequest('Foo,Bar,priority_0\n1,2,3\n4,5,6', 200,
{'content-type': 'text/plain'})
Mock.return_value = empty_file
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'googledocs_url': 'http://drive.google.com',
'formtype': 'gdocs', 'form_name': 'gdocs'},
follow_redirects=True)
app = db.session.query(App).first()
assert len(app.tasks) == 2, "There should be only 2 tasks"
n = 0
csv_tasks = [{u'Foo': u'1', u'Bar': u'2'}, {u'Foo': u'4', u'Bar': u'5'}]
for t in app.tasks:
assert t.info == csv_tasks[n], "The task info should be the same"
n += 1
# Check that only new items are imported
empty_file = FakeRequest('Foo,Bar,priority_0\n1,2,3\n4,5,6', 200,
{'content-type': 'text/plain'})
Mock.return_value = empty_file
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'googledocs_url': 'http://drive.google.com',
'formtype': 'gdocs', 'form_name': 'gdocs'},
follow_redirects=True)
app = db.session.query(App).first()
assert len(app.tasks) == 2, "There should be only 2 tasks"
n = 0
csv_tasks = [{u'Foo': u'1', u'Bar': u'2'}, {u'Foo': u'4', u'Bar': u'5'}]
for t in app.tasks:
assert t.info == csv_tasks[n], "The task info should be the same"
n += 1
assert "no new records" in res.data, res.data
@with_context
def test_39_google_oauth_creation(self):
"""Test WEB Google OAuth creation of user works"""
fake_response = {
u'access_token': u'access_token',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'id_token': u'token'}
fake_user = {
u'family_name': u'Doe', u'name': u'John Doe',
u'picture': u'https://goo.gl/img.jpg',
u'locale': u'en',
u'gender': u'male',
u'email': u'[email protected]',
u'birthday': u'0000-01-15',
u'link': u'https://plus.google.com/id',
u'given_name': u'John',
u'id': u'111111111111111111111',
u'verified_email': True}
from pybossa.view import google
response_user = google.manage_user(fake_response['access_token'],
fake_user, None)
user = db.session.query(User).get(1)
assert user.email_addr == response_user.email_addr, response_user
@with_context
def test_40_google_oauth_creation(self):
"""Test WEB Google OAuth detects same user name/email works"""
fake_response = {
u'access_token': u'access_token',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'id_token': u'token'}
fake_user = {
u'family_name': u'Doe', u'name': u'John Doe',
u'picture': u'https://goo.gl/img.jpg',
u'locale': u'en',
u'gender': u'male',
u'email': u'[email protected]',
u'birthday': u'0000-01-15',
u'link': u'https://plus.google.com/id',
u'given_name': u'John',
u'id': u'111111111111111111111',
u'verified_email': True}
self.register()
self.signout()
from pybossa.view import google
response_user = google.manage_user(fake_response['access_token'],
fake_user, None)
assert response_user is None, response_user
@with_context
def test_39_facebook_oauth_creation(self):
"""Test WEB Facebook OAuth creation of user works"""
fake_response = {
u'access_token': u'access_token',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'id_token': u'token'}
fake_user = {
u'username': u'teleyinex',
u'first_name': u'John',
u'last_name': u'Doe',
u'verified': True,
u'name': u'John Doe',
u'locale': u'en_US',
u'gender': u'male',
u'email': u'[email protected]',
u'quotes': u'"quote',
u'link': u'http://www.facebook.com/johndoe',
u'timezone': 1,
u'updated_time': u'2011-11-11T12:33:52+0000',
u'id': u'11111'}
from pybossa.view import facebook
response_user = facebook.manage_user(fake_response['access_token'],
fake_user, None)
user = db.session.query(User).get(1)
assert user.email_addr == response_user.email_addr, response_user
@with_context
def test_40_facebook_oauth_creation(self):
"""Test WEB Facebook OAuth detects same user name/email works"""
fake_response = {
u'access_token': u'access_token',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'id_token': u'token'}
fake_user = {
u'username': u'teleyinex',
u'first_name': u'John',
u'last_name': u'Doe',
u'verified': True,
u'name': u'John Doe',
u'locale': u'en_US',
u'gender': u'male',
u'email': u'[email protected]',
u'quotes': u'"quote',
u'link': u'http://www.facebook.com/johndoe',
u'timezone': 1,
u'updated_time': u'2011-11-11T12:33:52+0000',
u'id': u'11111'}
self.register()
self.signout()
from pybossa.view import facebook
response_user = facebook.manage_user(fake_response['access_token'],
fake_user, None)
assert response_user is None, response_user
@with_context
def test_39_twitter_oauth_creation(self):
"""Test WEB Twitter OAuth creation of user works"""
fake_response = {
u'access_token': {u'oauth_token': u'oauth_token',
u'oauth_token_secret': u'oauth_token_secret'},
u'token_type': u'Bearer',
u'expires_in': 3600,
u'id_token': u'token'}
fake_user = {u'screen_name': u'johndoe',
u'user_id': u'11111'}
from pybossa.view import twitter
response_user = twitter.manage_user(fake_response['access_token'],
fake_user, None)
user = db.session.query(User).get(1)
assert user.email_addr == response_user.email_addr, response_user
res = self.signin(email=user.email_addr, password='wrong')
msg = "It seems like you signed up with your Twitter account"
assert msg in res.data, msg
@with_context
def test_40_twitter_oauth_creation(self):
"""Test WEB Twitter OAuth detects same user name/email works"""
fake_response = {
u'access_token': {u'oauth_token': u'oauth_token',
u'oauth_token_secret': u'oauth_token_secret'},
u'token_type': u'Bearer',
u'expires_in': 3600,
u'id_token': u'token'}
fake_user = {u'screen_name': u'johndoe',
u'user_id': u'11111'}
self.register()
self.signout()
from pybossa.view import twitter
response_user = twitter.manage_user(fake_response['access_token'],
fake_user, None)
assert response_user is None, response_user
@with_context
def test_41_password_change(self):
"""Test WEB password changing"""
password = "mehpassword"
self.register(password=password)
res = self.app.post('/account/johndoe/update',
data={'current_password': password,
'new_password': "p4ssw0rd",
'confirm': "p4ssw0rd",
'btn': 'Password'},
follow_redirects=True)
assert "Yay, you changed your password succesfully!" in res.data, res.data
password = "p4ssw0rd"
self.signin(password=password)
res = self.app.post('/account/johndoe/update',
data={'current_password': "wrongpassword",
'new_password': "p4ssw0rd",
'confirm': "p4ssw0rd",
'btn': 'Password'},
follow_redirects=True)
msg = "Your current password doesn't match the one in our records"
assert msg in res.data
res = self.app.post('/account/johndoe/update',
data={'current_password': '',
'new_password':'',
'confirm': '',
'btn': 'Password'},
follow_redirects=True)
msg = "Please correct the errors"
assert msg in res.data
@with_context
def test_42_password_link(self):
"""Test WEB visibility of password change link"""
self.register()
res = self.app.get('/account/johndoe/update')
assert "Change your Password" in res.data
user = User.query.get(1)
user.twitter_user_id = 1234
db.session.add(user)
db.session.commit()
res = self.app.get('/account/johndoe/update')
assert "Change your Password" not in res.data, res.data
@with_context
def test_43_terms_of_use_and_data(self):
"""Test WEB terms of use is working"""
res = self.app.get('account/signin', follow_redirects=True)
assert "/help/terms-of-use" in res.data, res.data
assert "http://opendatacommons.org/licenses/by/" in res.data, res.data
res = self.app.get('account/register', follow_redirects=True)
assert "http://okfn.org/terms-of-use/" in res.data, res.data
assert "http://opendatacommons.org/licenses/by/" in res.data, res.data
@with_context
@patch('pybossa.view.account.signer.loads')
def test_44_password_reset_key_errors(self, Mock):
"""Test WEB password reset key errors are caught"""
self.register()
user = User.query.get(1)
userdict = {'user': user.name, 'password': user.passwd_hash}
fakeuserdict = {'user': user.name, 'password': 'wronghash'}
fakeuserdict_err = {'user': user.name, 'passwd': 'some'}
fakeuserdict_form = {'user': user.name, 'passwd': 'p4ssw0rD'}
key = signer.dumps(userdict, salt='password-reset')
returns = [BadSignature('Fake Error'), BadSignature('Fake Error'), userdict,
fakeuserdict, userdict, userdict, fakeuserdict_err]
def side_effects(*args, **kwargs):
result = returns.pop(0)
if isinstance(result, BadSignature):
raise result
return result
Mock.side_effect = side_effects
# Request with no key
res = self.app.get('/account/reset-password', follow_redirects=True)
assert 403 == res.status_code
# Request with invalid key
res = self.app.get('/account/reset-password?key=foo', follow_redirects=True)
assert 403 == res.status_code
# Request with key exception
res = self.app.get('/account/reset-password?key=%s' % (key), follow_redirects=True)
assert 403 == res.status_code
res = self.app.get('/account/reset-password?key=%s' % (key), follow_redirects=True)
assert 200 == res.status_code
res = self.app.get('/account/reset-password?key=%s' % (key), follow_redirects=True)
assert 403 == res.status_code
# Check validation
res = self.app.post('/account/reset-password?key=%s' % (key),
data={'new_password': '',
'confirm': '#4a4'},
follow_redirects=True)
assert "Please correct the errors" in res.data, res.data
res = self.app.post('/account/reset-password?key=%s' % (key),
data={'new_password': 'p4ssw0rD',
'confirm': 'p4ssw0rD'},
follow_redirects=True)
assert "You reset your password successfully!" in res.data
# Request without password
res = self.app.get('/account/reset-password?key=%s' % (key), follow_redirects=True)
assert 403 == res.status_code
@with_context
def test_45_password_reset_link(self):
"""Test WEB password reset email form"""
res = self.app.post('/account/forgot-password',
data={'email_addr': self.user.email_addr},
follow_redirects=True)
assert ("We don't have this email in our records. You may have"
" signed up with a different email or used Twitter, "
"Facebook, or Google to sign-in") in res.data
self.register()
self.register(name='janedoe')
self.register(name='google')
self.register(name='facebook')
jane = User.query.get(2)
jane.twitter_user_id = 10
google = User.query.get(3)
google.google_user_id = 103
facebook = User.query.get(4)
facebook.facebook_user_id = 104
db.session.add_all([jane, google, facebook])
db.session.commit()
with mail.record_messages() as outbox:
self.app.post('/account/forgot-password',
data={'email_addr': self.user.email_addr},
follow_redirects=True)
self.app.post('/account/forgot-password',
data={'email_addr': '[email protected]'},
follow_redirects=True)
self.app.post('/account/forgot-password',
data={'email_addr': '[email protected]'},
follow_redirects=True)
self.app.post('/account/forgot-password',
data={'email_addr': '[email protected]'},
follow_redirects=True)
assert 'Click here to recover your account' in outbox[0].body
assert 'your Twitter account to ' in outbox[1].body
assert 'your Google account to ' in outbox[2].body
assert 'your Facebook account to ' in outbox[3].body
# Test with not valid form
res = self.app.post('/account/forgot-password',
data={'email_addr': ''},
follow_redirects=True)
msg = "Something went wrong, please correct the errors"
assert msg in res.data, res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_46_tasks_exists(self, mock):
"""Test WEB tasks page works."""
self.register()
self.new_application()
res = self.app.get('/app/sampleapp/tasks/', follow_redirects=True)
assert "Edit the task presenter" in res.data, \
"Task Presenter Editor should be an option"
app = db.session.query(App).first()
app.hidden = 1
db.session.add(app)
db.session.commit()
# As owner
res = self.app.get('/app/sampleapp/tasks/', follow_redirects=True)
assert res.status_code == 200, res.status_code
assert "Edit the task presenter" in res.data, \
"Task Presenter Editor should be an option"
self.signout()
# As anonymous
res = self.app.get('/app/sampleapp/tasks/', follow_redirects=True)
assert res.status_code == 401, res.status_code
with self.flask_app.app_context():
self.create()
# As another user, but not owner
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.app.get('/app/sampleapp/tasks/', follow_redirects=True)
assert res.status_code == 403, res.status_code
self.signout()
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_47_task_presenter_editor_loads(self, mock):
"""Test WEB task presenter editor loads"""
self.register()
self.new_application()
res = self.app.get('/app/sampleapp/tasks/taskpresentereditor',
follow_redirects=True)
err_msg = "Task Presenter options not found"
assert "Task Presenter Editor" in res.data, err_msg
err_msg = "Basic template not found"
assert "The most basic template" in res.data, err_msg
err_msg = "Image Pattern Recognition not found"
assert "Flickr Person Finder template" in res.data, err_msg
err_msg = "Geo-coding"
assert "Urban Park template" in res.data, err_msg
err_msg = "Transcribing documents"
assert "PDF transcription template" in res.data, err_msg
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_48_task_presenter_editor_works(self, mock):
"""Test WEB task presenter editor works"""
self.register()
self.new_application()
app = db.session.query(App).first()
err_msg = "Task Presenter should be empty"
assert not app.info.get('task_presenter'), err_msg
res = self.app.get('/app/sampleapp/tasks/taskpresentereditor?template=basic',
follow_redirects=True)
assert "var editor" in res.data, "CodeMirror Editor not found"
assert "Task Presenter" in res.data, "CodeMirror Editor not found"
assert "Task Presenter Preview" in res.data, "CodeMirror View not found"
res = self.app.post('/app/sampleapp/tasks/taskpresentereditor',
data={'editor': 'Some HTML code!'},
follow_redirects=True)
assert "Sample Project" in res.data, "Does not return to app details"
app = db.session.query(App).first()
err_msg = "Task Presenter failed to update"
assert app.info['task_presenter'] == 'Some HTML code!', err_msg
# Check it loads the previous posted code:
res = self.app.get('/app/sampleapp/tasks/taskpresentereditor',
follow_redirects=True)
assert "Some HTML code" in res.data, res.data
# Now with hidden apps
app.hidden = 1
db.session.add(app)
db.session.commit()
res = self.app.get('/app/sampleapp/tasks/taskpresentereditor?template=basic',
follow_redirects=True)
assert "var editor" in res.data, "CodeMirror Editor not found"
assert "Task Presenter" in res.data, "CodeMirror Editor not found"
assert "Task Presenter Preview" in res.data, "CodeMirror View not found"
res = self.app.post('/app/sampleapp/tasks/taskpresentereditor',
data={'editor': 'Some HTML code!'},
follow_redirects=True)
assert "Sample Project" in res.data, "Does not return to app details"
app = db.session.query(App).first()
err_msg = "Task Presenter failed to update"
assert app.info['task_presenter'] == 'Some HTML code!', err_msg
# Check it loads the previous posted code:
res = self.app.get('/app/sampleapp/tasks/taskpresentereditor',
follow_redirects=True)
assert "Some HTML code" in res.data, res.data
self.signout()
with self.flask_app.app_context():
self.create()
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.app.get('/app/sampleapp/tasks/taskpresentereditor?template=basic',
follow_redirects=True)
assert res.status_code == 403
@with_context
@patch('pybossa.ckan.requests.get')
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_48_update_app_info(self, Mock, mock):
"""Test WEB project update/edit works keeping previous info values"""
html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,
{'content-type': 'application/json'})
Mock.return_value = html_request
self.register()
self.new_application()
app = db.session.query(App).first()
err_msg = "Task Presenter should be empty"
assert not app.info.get('task_presenter'), err_msg
res = self.app.post('/app/sampleapp/tasks/taskpresentereditor',
data={'editor': 'Some HTML code!'},
follow_redirects=True)
assert "Sample Project" in res.data, "Does not return to app details"
app = db.session.query(App).first()
for i in range(10):
key = "key_%s" % i
app.info[key] = i
db.session.add(app)
db.session.commit()
_info = app.info
self.update_application()
app = db.session.query(App).first()
for key in _info:
assert key in app.info.keys(), \
"The key %s is lost and it should be here" % key
assert app.name == "Sample Project", "The project has not been updated"
error_msg = "The project description has not been updated"
assert app.description == "Description", error_msg
error_msg = "The project long description has not been updated"
assert app.long_description == "Long desc", error_msg
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_49_announcement_messages(self, mock):
"""Test WEB announcement messages works"""
self.register()
res = self.app.get("/", follow_redirects=True)
error_msg = "There should be a message for the root user"
print res.data
assert "Root Message" in res.data, error_msg
error_msg = "There should be a message for the user"
assert "User Message" in res.data, error_msg
error_msg = "There should not be an owner message"
assert "Owner Message" not in res.data, error_msg
# Now make the user a project owner
self.new_application()
res = self.app.get("/", follow_redirects=True)
error_msg = "There should be a message for the root user"
assert "Root Message" in res.data, error_msg
error_msg = "There should be a message for the user"
assert "User Message" in res.data, error_msg
error_msg = "There should be an owner message"
assert "Owner Message" in res.data, error_msg
self.signout()
# Register another user
self.register(fullname="Jane Doe", name="janedoe",
password="janedoe", email="[email protected]")
res = self.app.get("/", follow_redirects=True)
error_msg = "There should not be a message for the root user"
assert "Root Message" not in res.data, error_msg
error_msg = "There should be a message for the user"
assert "User Message" in res.data, error_msg
error_msg = "There should not be an owner message"
assert "Owner Message" not in res.data, error_msg
self.signout()
# Now as an anonymous user
res = self.app.get("/", follow_redirects=True)
error_msg = "There should not be a message for the root user"
assert "Root Message" not in res.data, error_msg
error_msg = "There should not be a message for the user"
assert "User Message" not in res.data, error_msg
error_msg = "There should not be an owner message"
assert "Owner Message" not in res.data, error_msg
@with_context
def test_50_export_task_json(self):
"""Test WEB export Tasks to JSON works"""
Fixtures.create()
# First test for a non-existant app
uri = '/app/somethingnotexists/tasks/export'
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the tasks in JSON format
uri = "/app/somethingnotexists/tasks/export?type=task&format=json"
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now with a real app
uri = '/app/%s/tasks/export' % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % Fixtures.app_name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now test that a 404 is raised when an arg is invalid
uri = "/app/%s/tasks/export?type=ask&format=json" % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
uri = "/app/%s/tasks/export?format=json" % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
uri = "/app/%s/tasks/export?type=task" % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# And a 415 is raised if the requested format is not supported or invalid
uri = "/app/%s/tasks/export?type=task&format=gson" % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
assert res.status == '415 UNSUPPORTED MEDIA TYPE', res.status
# Now get the tasks in JSON format
uri = "/app/%s/tasks/export?type=task&format=json" % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
exported_tasks = json.loads(res.data)
app = db.session.query(App)\
.filter_by(short_name=Fixtures.app_short_name)\
.first()
err_msg = "The number of exported tasks is different from App Tasks"
assert len(exported_tasks) == len(app.tasks), err_msg
# Tasks are exported as an attached file
content_disposition = 'attachment; filename=test-app_task.json'
assert res.headers.get('Content-Disposition') == content_disposition, res.headers
app.hidden = 1
db.session.add(app)
db.session.commit()
res = self.app.get('app/%s/tasks/export' % (app.short_name),
follow_redirects=True)
assert res.status_code == 401, res.status_code
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.app.get('app/%s/tasks/export' % (app.short_name),
follow_redirects=True)
assert res.status_code == 403, res.status_code
# Owner
self.signin(email=Fixtures.email_addr, password=Fixtures.password)
res = self.app.get('app/%s/tasks/export' % (app.short_name),
follow_redirects=True)
assert res.status_code == 200, res.status_code
def test_export_task_json_support_non_latin1_project_names(self):
app = AppFactory.create(name='Измени Киев!', short_name='Измени Киев!')
res = self.app.get('app/%s/tasks/export?type=task&format=json' % app.short_name,
follow_redirects=True)
assert 'Измени Киев!' in res.headers.get('Content-Disposition'), res
def test_export_taskrun_json_support_non_latin1_project_names(self):
app = AppFactory.create(name='Измени Киев!', short_name='Измени Киев!')
res = self.app.get('app/%s/tasks/export?type=task_run&format=json' % app.short_name,
follow_redirects=True)
print res
assert 'Измени Киев!' in res.headers.get('Content-Disposition'), res
def test_export_task_csv_support_non_latin1_project_names(self):
app = AppFactory.create(name='Измени Киев!', short_name='Измени Киев!')
TaskFactory.create(app=app)
res = self.app.get('/app/%s/tasks/export?type=task&format=csv' % app.short_name,
follow_redirects=True)
assert 'Измени Киев!' in res.headers.get('Content-Disposition'), res
def test_export_taskrun_csv_support_non_latin1_project_names(self):
app = AppFactory.create(name='Измени Киев!', short_name='Измени Киев!')
task = TaskFactory.create(app=app)
TaskRunFactory.create(task=task)
res = self.app.get('/app/%s/tasks/export?type=task_run&format=csv' % app.short_name,
follow_redirects=True)
print res
assert 'Измени Киев!' in res.headers.get('Content-Disposition'), res
@with_context
def test_51_export_taskruns_json(self):
"""Test WEB export Task Runs to JSON works"""
Fixtures.create()
# First test for a non-existant app
uri = '/app/somethingnotexists/tasks/export'
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the tasks in JSON format
uri = "/app/somethingnotexists/tasks/export?type=task&format=json"
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now with a real app
uri = '/app/%s/tasks/export' % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % Fixtures.app_name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now get the tasks in JSON format
uri = "/app/%s/tasks/export?type=task_run&format=json" % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
exported_task_runs = json.loads(res.data)
app = db.session.query(App)\
.filter_by(short_name=Fixtures.app_short_name)\
.first()
err_msg = "The number of exported task runs is different from App Tasks"
assert len(exported_task_runs) == len(app.task_runs), err_msg
# Task runs are exported as an attached file
content_disposition = 'attachment; filename=test-app_task_run.json'
assert res.headers.get('Content-Disposition') == content_disposition, res.headers
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_52_export_task_csv(self, mock):
"""Test WEB export Tasks to CSV works"""
#Fixtures.create()
# First test for a non-existant app
uri = '/app/somethingnotexists/tasks/export'
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the tasks in CSV format
uri = "/app/somethingnotexists/tasks/export?type=task&format=csv"
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the wrong table name in CSV format
uri = "/app/%s/tasks/export?type=wrong&format=csv" % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now with a real app
app = AppFactory.create()
for i in range(0, 5):
task = TaskFactory.create(app=app, info={'question': i})
uri = '/app/%s/tasks/export' % app.short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % app.name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now get the tasks in CSV format
uri = "/app/%s/tasks/export?type=task&format=csv" % app.short_name
res = self.app.get(uri, follow_redirects=True)
csv_content = StringIO.StringIO(res.data)
csvreader = unicode_csv_reader(csv_content)
app = db.session.query(App)\
.filter_by(short_name=app.short_name)\
.first()
exported_tasks = []
n = 0
for row in csvreader:
print row
if n != 0:
exported_tasks.append(row)
else:
keys = row
n = n + 1
err_msg = "The number of exported tasks is different from App Tasks"
assert len(exported_tasks) == len(app.tasks), err_msg
for t in app.tasks:
err_msg = "All the task column names should be included"
for tk in t.dictize().keys():
expected_key = "task__%s" % tk
assert expected_key in keys, err_msg
err_msg = "All the task.info column names should be included"
for tk in t.info.keys():
expected_key = "taskinfo__%s" % tk
assert expected_key in keys, err_msg
for et in exported_tasks:
task_id = et[keys.index('task__id')]
task = db.session.query(Task).get(task_id)
task_dict = task.dictize()
for k in task_dict:
slug = 'task__%s' % k
err_msg = "%s != %s" % (task_dict[k], et[keys.index(slug)])
if k != 'info':
assert unicode(task_dict[k]) == et[keys.index(slug)], err_msg
else:
assert json.dumps(task_dict[k]) == et[keys.index(slug)], err_msg
for k in task_dict['info'].keys():
slug = 'taskinfo__%s' % k
err_msg = "%s != %s" % (task_dict['info'][k], et[keys.index(slug)])
assert unicode(task_dict['info'][k]) == et[keys.index(slug)], err_msg
# Tasks are exported as an attached file
content_disposition = 'attachment; filename=app1_task.csv'
assert res.headers.get('Content-Disposition') == content_disposition, res.headers
# With an empty app
app = AppFactory.create()
# Now get the tasks in CSV format
uri = "/app/%s/tasks/export?type=task&format=csv" % app.short_name
res = self.app.get(uri, follow_redirects=True)
msg = "project does not have tasks"
assert msg in res.data, msg
@with_context
def test_53_export_task_runs_csv(self):
"""Test WEB export Task Runs to CSV works"""
# First test for a non-existant app
uri = '/app/somethingnotexists/tasks/export'
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the tasks in CSV format
uri = "/app/somethingnotexists/tasks/export?type=tas&format=csv"
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now with a real app
app = AppFactory.create()
task = TaskFactory.create(app=app)
for i in range(2):
task_run = TaskRunFactory.create(app=app, task=task, info={'answer': i})
uri = '/app/%s/tasks/export' % app.short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % app.name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now get the tasks in CSV format
uri = "/app/%s/tasks/export?type=task_run&format=csv" % app.short_name
res = self.app.get(uri, follow_redirects=True)
csv_content = StringIO.StringIO(res.data)
csvreader = unicode_csv_reader(csv_content)
app = db.session.query(App)\
.filter_by(short_name=app.short_name)\
.first()
exported_task_runs = []
n = 0
for row in csvreader:
if n != 0:
exported_task_runs.append(row)
else:
keys = row
n = n + 1
err_msg = "The number of exported task runs is different \
from App Tasks Runs: %s != %s" % (len(exported_task_runs), len(app.task_runs))
assert len(exported_task_runs) == len(app.task_runs), err_msg
for t in app.tasks[0].task_runs:
for tk in t.dictize().keys():
expected_key = "task_run__%s" % tk
assert expected_key in keys, expected_key
for tk in t.info.keys():
expected_key = "task_runinfo__%s" % tk
assert expected_key in keys, expected_key
for et in exported_task_runs:
task_run_id = et[keys.index('task_run__id')]
task_run = db.session.query(TaskRun).get(task_run_id)
task_run_dict = task_run.dictize()
for k in task_run_dict:
slug = 'task_run__%s' % k
err_msg = "%s != %s" % (task_run_dict[k], et[keys.index(slug)])
if k != 'info':
assert unicode(task_run_dict[k]) == et[keys.index(slug)], err_msg
else:
assert json.dumps(task_run_dict[k]) == et[keys.index(slug)], err_msg
for k in task_run_dict['info'].keys():
slug = 'task_runinfo__%s' % k
err_msg = "%s != %s" % (task_run_dict['info'][k], et[keys.index(slug)])
assert unicode(task_run_dict['info'][k]) == et[keys.index(slug)], err_msg
# Task runs are exported as an attached file
content_disposition = 'attachment; filename=app1_task_run.csv'
assert res.headers.get('Content-Disposition') == content_disposition, res.headers
@with_context
@patch('pybossa.view.applications.Ckan', autospec=True)
def test_export_tasks_ckan_exception(self, mock1):
mocks = [Mock()]
from test_ckan import TestCkanModule
fake_ckn = TestCkanModule()
package = fake_ckn.pkg_json_found
package['id'] = 3
mocks[0].package_exists.return_value = (False,
Exception("CKAN: error",
"error", 500))
# mocks[0].package_create.return_value = fake_ckn.pkg_json_found
# mocks[0].resource_create.return_value = dict(result=dict(id=3))
# mocks[0].datastore_create.return_value = 'datastore'
# mocks[0].datastore_upsert.return_value = 'datastore'
mock1.side_effect = mocks
"""Test WEB Export CKAN Tasks works."""
Fixtures.create()
user = db.session.query(User).filter_by(name=Fixtures.name).first()
app = db.session.query(App).first()
user.ckan_api = 'ckan-api-key'
app.owner_id = user.id
db.session.add(user)
db.session.add(app)
db.session.commit()
self.signin(email=user.email_addr, password=Fixtures.password)
# Now with a real app
uri = '/app/%s/tasks/export' % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % Fixtures.app_name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now get the tasks in CKAN format
uri = "/app/%s/tasks/export?type=task&format=ckan" % Fixtures.app_short_name
with patch.dict(self.flask_app.config, {'CKAN_URL': 'http://ckan.com'}):
# First time exporting the package
res = self.app.get(uri, follow_redirects=True)
msg = 'Error'
err_msg = "An exception should be raised"
assert msg in res.data, err_msg
@with_context
@patch('pybossa.view.applications.Ckan', autospec=True)
def test_export_tasks_ckan_connection_error(self, mock1):
mocks = [Mock()]
from test_ckan import TestCkanModule
fake_ckn = TestCkanModule()
package = fake_ckn.pkg_json_found
package['id'] = 3
mocks[0].package_exists.return_value = (False, ConnectionError)
# mocks[0].package_create.return_value = fake_ckn.pkg_json_found
# mocks[0].resource_create.return_value = dict(result=dict(id=3))
# mocks[0].datastore_create.return_value = 'datastore'
# mocks[0].datastore_upsert.return_value = 'datastore'
mock1.side_effect = mocks
"""Test WEB Export CKAN Tasks works."""
Fixtures.create()
user = db.session.query(User).filter_by(name=Fixtures.name).first()
app = db.session.query(App).first()
user.ckan_api = 'ckan-api-key'
app.owner_id = user.id
db.session.add(user)
db.session.add(app)
db.session.commit()
self.signin(email=user.email_addr, password=Fixtures.password)
# Now with a real app
uri = '/app/%s/tasks/export' % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % Fixtures.app_name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now get the tasks in CKAN format
uri = "/app/%s/tasks/export?type=task&format=ckan" % Fixtures.app_short_name
with patch.dict(self.flask_app.config, {'CKAN_URL': 'http://ckan.com'}):
# First time exporting the package
res = self.app.get(uri, follow_redirects=True)
msg = 'CKAN server seems to be down'
err_msg = "A connection exception should be raised"
assert msg in res.data, err_msg
@with_context
@patch('pybossa.view.applications.Ckan', autospec=True)
def test_task_export_tasks_ckan_first_time(self, mock1):
"""Test WEB Export CKAN Tasks works without an existing package."""
# Second time exporting the package
mocks = [Mock()]
resource = dict(name='task', id=1)
package = dict(id=3, resources=[resource])
mocks[0].package_exists.return_value = (None, None)
mocks[0].package_create.return_value = package
#mocks[0].datastore_delete.return_value = None
mocks[0].datastore_create.return_value = None
mocks[0].datastore_upsert.return_value = None
mocks[0].resource_create.return_value = dict(result=dict(id=3))
mocks[0].datastore_create.return_value = 'datastore'
mocks[0].datastore_upsert.return_value = 'datastore'
mock1.side_effect = mocks
Fixtures.create()
user = db.session.query(User).filter_by(name=Fixtures.name).first()
app = db.session.query(App).first()
user.ckan_api = 'ckan-api-key'
app.owner_id = user.id
db.session.add(user)
db.session.add(app)
db.session.commit()
self.signin(email=user.email_addr, password=Fixtures.password)
# First test for a non-existant app
uri = '/app/somethingnotexists/tasks/export'
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the tasks in CKAN format
uri = "/app/somethingnotexists/tasks/export?type=task&format=ckan"
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the tasks in CKAN format
uri = "/app/somethingnotexists/tasks/export?type=other&format=ckan"
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now with a real app
uri = '/app/%s/tasks/export' % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % Fixtures.app_name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now get the tasks in CKAN format
uri = "/app/%s/tasks/export?type=task&format=ckan" % Fixtures.app_short_name
#res = self.app.get(uri, follow_redirects=True)
with patch.dict(self.flask_app.config, {'CKAN_URL': 'http://ckan.com'}):
# First time exporting the package
res = self.app.get(uri, follow_redirects=True)
msg = 'Data exported to http://ckan.com'
err_msg = "Tasks should be exported to CKAN"
assert msg in res.data, err_msg
@with_context
@patch('pybossa.view.applications.Ckan', autospec=True)
def test_task_export_tasks_ckan_second_time(self, mock1):
"""Test WEB Export CKAN Tasks works with an existing package."""
# Second time exporting the package
mocks = [Mock()]
resource = dict(name='task', id=1)
package = dict(id=3, resources=[resource])
mocks[0].package_exists.return_value = (package, None)
mocks[0].package_update.return_value = package
mocks[0].datastore_delete.return_value = None
mocks[0].datastore_create.return_value = None
mocks[0].datastore_upsert.return_value = None
mocks[0].resource_create.return_value = dict(result=dict(id=3))
mocks[0].datastore_create.return_value = 'datastore'
mocks[0].datastore_upsert.return_value = 'datastore'
mock1.side_effect = mocks
Fixtures.create()
user = db.session.query(User).filter_by(name=Fixtures.name).first()
app = db.session.query(App).first()
user.ckan_api = 'ckan-api-key'
app.owner_id = user.id
db.session.add(user)
db.session.add(app)
db.session.commit()
self.signin(email=user.email_addr, password=Fixtures.password)
# First test for a non-existant app
uri = '/app/somethingnotexists/tasks/export'
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the tasks in CKAN format
uri = "/app/somethingnotexists/tasks/export?type=task&format=ckan"
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now with a real app
uri = '/app/%s/tasks/export' % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % Fixtures.app_name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now get the tasks in CKAN format
uri = "/app/%s/tasks/export?type=task&format=ckan" % Fixtures.app_short_name
#res = self.app.get(uri, follow_redirects=True)
with patch.dict(self.flask_app.config, {'CKAN_URL': 'http://ckan.com'}):
# First time exporting the package
res = self.app.get(uri, follow_redirects=True)
msg = 'Data exported to http://ckan.com'
err_msg = "Tasks should be exported to CKAN"
assert msg in res.data, err_msg
@with_context
@patch('pybossa.view.applications.Ckan', autospec=True)
def test_task_export_tasks_ckan_without_resources(self, mock1):
"""Test WEB Export CKAN Tasks works without resources ."""
mocks = [Mock()]
package = dict(id=3, resources=[])
mocks[0].package_exists.return_value = (package, None)
mocks[0].package_update.return_value = package
mocks[0].resource_create.return_value = dict(result=dict(id=3))
mocks[0].datastore_create.return_value = 'datastore'
mocks[0].datastore_upsert.return_value = 'datastore'
mock1.side_effect = mocks
Fixtures.create()
user = db.session.query(User).filter_by(name=Fixtures.name).first()
app = db.session.query(App).first()
user.ckan_api = 'ckan-api-key'
app.owner_id = user.id
db.session.add(user)
db.session.add(app)
db.session.commit()
self.signin(email=user.email_addr, password=Fixtures.password)
# First test for a non-existant app
uri = '/app/somethingnotexists/tasks/export'
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now get the tasks in CKAN format
uri = "/app/somethingnotexists/tasks/export?type=task&format=ckan"
res = self.app.get(uri, follow_redirects=True)
assert res.status == '404 NOT FOUND', res.status
# Now with a real app
uri = '/app/%s/tasks/export' % Fixtures.app_short_name
res = self.app.get(uri, follow_redirects=True)
heading = "<strong>%s</strong>: Export All Tasks and Task Runs" % Fixtures.app_name
assert heading in res.data, "Export page should be available\n %s" % res.data
# Now get the tasks in CKAN format
uri = "/app/%s/tasks/export?type=task&format=ckan" % Fixtures.app_short_name
#res = self.app.get(uri, follow_redirects=True)
with patch.dict(self.flask_app.config, {'CKAN_URL': 'http://ckan.com'}):
# First time exporting the package
res = self.app.get(uri, follow_redirects=True)
msg = 'Data exported to http://ckan.com'
err_msg = "Tasks should be exported to CKAN"
assert msg in res.data, err_msg
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_54_import_tasks(self, mock):
"""Test WEB import Task templates should work"""
Fixtures.create()
self.register()
self.new_application()
# Without tasks, there should be a template
res = self.app.get('/app/sampleapp/tasks/import', follow_redirects=True)
err_msg = "There should be a CSV template"
assert "template=csv" in res.data, err_msg
err_msg = "There should be an Image template"
assert "mode=image" in res.data, err_msg
err_msg = "There should be a Map template"
assert "mode=map" in res.data, err_msg
err_msg = "There should be a PDF template"
assert "mode=pdf" in res.data, err_msg
# With tasks
self.new_task(1)
res = self.app.get('/app/sampleapp/tasks/import', follow_redirects=True)
err_msg = "There should load directly the basic template"
err_msg = "There should not be a CSV template"
assert "template=basic" not in res.data, err_msg
err_msg = "There should not be an Image template"
assert "template=image" not in res.data, err_msg
err_msg = "There should not be a Map template"
assert "template=map" not in res.data, err_msg
err_msg = "There should not be a PDF template"
assert "template=pdf" not in res.data, err_msg
self.signout()
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.app.get('/app/sampleapp/tasks/import', follow_redirects=True)
assert res.status_code == 403, res.status_code
@with_context
def test_55_facebook_account_warning(self):
"""Test WEB Facebook OAuth user gets a hint to sign in"""
user = User(fullname='John',
name='john',
email_addr='[email protected]',
info={})
user.info = dict(facebook_token=u'facebook')
msg, method = get_user_signup_method(user)
err_msg = "Should return 'facebook' but returned %s" % method
assert method == 'facebook', err_msg
user.info = dict(google_token=u'google')
msg, method = get_user_signup_method(user)
err_msg = "Should return 'google' but returned %s" % method
assert method == 'google', err_msg
user.info = dict(twitter_token=u'twitter')
msg, method = get_user_signup_method(user)
err_msg = "Should return 'twitter' but returned %s" % method
assert method == 'twitter', err_msg
user.info = {}
msg, method = get_user_signup_method(user)
err_msg = "Should return 'local' but returned %s" % method
assert method == 'local', err_msg
@with_context
def test_56_delete_tasks(self):
"""Test WEB delete tasks works"""
Fixtures.create()
# Anonymous user
res = self.app.get('/app/test-app/tasks/delete', follow_redirects=True)
err_msg = "Anonymous user should be redirected for authentication"
assert "Please sign in to access this page" in res.data, err_msg
err_msg = "Anonymous user should not be allowed to delete tasks"
res = self.app.post('/app/test-app/tasks/delete', follow_redirects=True)
err_msg = "Anonymous user should not be allowed to delete tasks"
assert "Please sign in to access this page" in res.data, err_msg
# Authenticated user but not owner
self.register()
res = self.app.get('/app/test-app/tasks/delete', follow_redirects=True)
err_msg = "Authenticated user but not owner should get 403 FORBIDDEN in GET"
assert res.status == '403 FORBIDDEN', err_msg
res = self.app.post('/app/test-app/tasks/delete', follow_redirects=True)
err_msg = "Authenticated user but not owner should get 403 FORBIDDEN in POST"
assert res.status == '403 FORBIDDEN', err_msg
self.signout()
# Owner
tasks = db.session.query(Task).filter_by(app_id=1).all()
res = self.signin(email=u'[email protected]', password=u'tester')
res = self.app.get('/app/test-app/tasks/delete', follow_redirects=True)
err_msg = "Owner user should get 200 in GET"
assert res.status == '200 OK', err_msg
assert len(tasks) > 0, "len(app.tasks) > 0"
res = self.app.post('/app/test-app/tasks/delete', follow_redirects=True)
err_msg = "Owner should get 200 in POST"
assert res.status == '200 OK', err_msg
tasks = db.session.query(Task).filter_by(app_id=1).all()
assert len(tasks) == 0, "len(app.tasks) != 0"
# Admin
res = self.signin(email=u'[email protected]', password=u'tester' + 'root')
res = self.app.get('/app/test-app/tasks/delete', follow_redirects=True)
err_msg = "Admin user should get 200 in GET"
assert res.status_code == 200, err_msg
res = self.app.post('/app/test-app/tasks/delete', follow_redirects=True)
err_msg = "Admin should get 200 in POST"
assert res.status_code == 200, err_msg
@with_context
def test_57_reset_api_key(self):
"""Test WEB reset api key works"""
url = "/account/johndoe/update"
# Anonymous user
res = self.app.get(url, follow_redirects=True)
err_msg = "Anonymous user should be redirected for authentication"
assert "Please sign in to access this page" in res.data, err_msg
res = self.app.post(url, follow_redirects=True)
assert "Please sign in to access this page" in res.data, err_msg
# Authenticated user
self.register()
user = db.session.query(User).get(1)
url = "/account/%s/update" % user.name
api_key = user.api_key
res = self.app.get(url, follow_redirects=True)
err_msg = "Authenticated user should get access to reset api key page"
assert res.status_code == 200, err_msg
assert "reset your personal API Key" in res.data, err_msg
url = "/account/%s/resetapikey" % user.name
res = self.app.post(url, follow_redirects=True)
err_msg = "Authenticated user should be able to reset his api key"
assert res.status_code == 200, err_msg
user = db.session.query(User).get(1)
err_msg = "New generated API key should be different from old one"
assert api_key != user.api_key, err_msg
self.signout()
self.register(fullname="new", name="new")
res = self.app.post(url)
assert res.status_code == 403, res.status_code
url = "/account/fake/resetapikey"
res = self.app.post(url)
assert res.status_code == 404, res.status_code
@with_context
@patch('pybossa.view.stats.get_locs', return_value=[{'latitude':0, 'longitude':0}])
def test_58_global_stats(self, mock1):
"""Test WEB global stats of the site works"""
Fixtures.create()
url = "/stats"
res = self.app.get(url, follow_redirects=True)
err_msg = "There should be a Global Statistics page of the project"
assert "General Statistics" in res.data, err_msg
with patch.dict(self.flask_app.config, {'GEO': True}):
res = self.app.get(url, follow_redirects=True)
assert "GeoLite" in res.data, res.data
@with_context
def test_59_help_api(self):
"""Test WEB help api page exists"""
Fixtures.create()
url = "/help/api"
res = self.app.get(url, follow_redirects=True)
err_msg = "There should be a help api page"
assert "API Help" in res.data, err_msg
@with_context
def test_59_help_license(self):
"""Test WEB help license page exists."""
url = "/help/license"
res = self.app.get(url, follow_redirects=True)
err_msg = "There should be a help license page"
assert "Licenses" in res.data, err_msg
@with_context
def test_59_about(self):
"""Test WEB help about page exists."""
url = "/about"
res = self.app.get(url, follow_redirects=True)
err_msg = "There should be an about page"
assert "About" in res.data, err_msg
@with_context
def test_59_help_tos(self):
"""Test WEB help TOS page exists."""
url = "/help/terms-of-use"
res = self.app.get(url, follow_redirects=True)
err_msg = "There should be a TOS page"
assert "Terms for use" in res.data, err_msg
@with_context
def test_59_help_policy(self):
"""Test WEB help policy page exists."""
url = "/help/cookies-policy"
res = self.app.get(url, follow_redirects=True)
err_msg = "There should be a TOS page"
assert "uses cookies" in res.data, err_msg
@with_context
def test_69_allow_anonymous_contributors(self):
"""Test WEB allow anonymous contributors works"""
Fixtures.create()
app = db.session.query(App).first()
url = '/app/%s/newtask' % app.short_name
# All users are allowed to participate by default
# As Anonymous user
res = self.app.get(url, follow_redirects=True)
err_msg = "The anonymous user should be able to participate"
assert app.name in res.data, err_msg
# As registered user
self.register()
self.signin()
res = self.app.get(url, follow_redirects=True)
err_msg = "The anonymous user should be able to participate"
assert app.name in res.data, err_msg
self.signout()
# Now only allow authenticated users
app.allow_anonymous_contributors = False
db.session.add(app)
db.session.commit()
# As Anonymous user
res = self.app.get(url, follow_redirects=True)
err_msg = "User should be redirected to sign in"
app = db.session.query(App).first()
msg = "Oops! You have to sign in to participate in <strong>%s</strong>" % app.name
assert msg in res.data, err_msg
# As registered user
res = self.signin()
res = self.app.get(url, follow_redirects=True)
err_msg = "The authenticated user should be able to participate"
assert app.name in res.data, err_msg
self.signout()
# However if the app is hidden, it should be forbidden
app.hidden = 1
db.session.add(app)
db.session.commit()
# As Anonymous user
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 401, res.status_code
# As registered user
self.signin()
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 403, res.status_code
self.signout()
# As admin
self.signin(email=Fixtures.root_addr, password=Fixtures.root_password)
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 200, res.status_code
self.signout()
# As owner
self.signin(email=Fixtures.email_addr, password=Fixtures.password)
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 200, res.status_code
self.signout()
# Now only allow authenticated users
app.allow_anonymous_contributors = False
app.hidden = 0
db.session.add(app)
db.session.commit()
res = self.app.get(url, follow_redirects=True)
err_msg = "Only authenticated users can participate"
assert "You have to sign in" in res.data, err_msg
@with_context
def test_70_public_user_profile(self):
"""Test WEB public user profile works"""
Fixtures.create()
# Should work as an anonymous user
url = '/account/%s/' % Fixtures.name
res = self.app.get(url, follow_redirects=True)
err_msg = "There should be a public profile page for the user"
assert Fixtures.fullname in res.data, err_msg
# Should work as an authenticated user
self.signin()
res = self.app.get(url, follow_redirects=True)
assert Fixtures.fullname in res.data, err_msg
# Should return 404 when a user does not exist
url = '/account/a-fake-name-that-does-not-exist/'
res = self.app.get(url, follow_redirects=True)
err_msg = "It should return a 404"
assert res.status_code == 404, err_msg
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_71_bulk_epicollect_import_unauthorized(self, Mock, mock):
"""Test WEB bulk import unauthorized works"""
unauthorized_request = FakeRequest('Unauthorized', 403,
{'content-type': 'application/json'})
Mock.return_value = unauthorized_request
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'epicollect_project': 'fakeproject',
'epicollect_form': 'fakeform',
'formtype': 'json', 'form_name': 'epicollect'},
follow_redirects=True)
msg = "Oops! It looks like you don't have permission to access the " \
"EpiCollect Plus project"
assert msg in res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_72_bulk_epicollect_import_non_html(self, Mock, mock):
"""Test WEB bulk import non html works"""
html_request = FakeRequest('Not an application/json', 200,
{'content-type': 'text/html'})
Mock.return_value = html_request
self.register()
self.new_application()
app = db.session.query(App).first()
url = '/app/%s/tasks/import?template=csv' % (app.short_name)
res = self.app.post(url, data={'epicollect_project': 'fakeproject',
'epicollect_form': 'fakeform',
'formtype': 'json', 'form_name': 'epicollect'},
follow_redirects=True)
msg = "Oops! That project and form do not look like the right one."
assert msg in res.data
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
@patch('pybossa.importers.requests.get')
def test_73_bulk_epicollect_import_json(self, Mock, mock):
"""Test WEB bulk import json works"""
data = [dict(DeviceID=23)]
html_request = FakeRequest(json.dumps(data), 200,
{'content-type': 'application/json'})
Mock.return_value = html_request
self.register()
self.new_application()
app = db.session.query(App).first()
res = self.app.post(('/app/%s/tasks/import' % (app.short_name)),
data={'epicollect_project': 'fakeproject',
'epicollect_form': 'fakeform',
'formtype': 'json', 'form_name': 'epicollect'},
follow_redirects=True)
app = db.session.query(App).first()
err_msg = "Tasks should be imported"
assert "1 Task imported successfully!" in res.data, err_msg
tasks = db.session.query(Task).filter_by(app_id=app.id).all()
err_msg = "The imported task from EpiCollect is wrong"
assert tasks[0].info['DeviceID'] == 23, err_msg
data = [dict(DeviceID=23), dict(DeviceID=24)]
html_request = FakeRequest(json.dumps(data), 200,
{'content-type': 'application/json'})
Mock.return_value = html_request
res = self.app.post(('/app/%s/tasks/import' % (app.short_name)),
data={'epicollect_project': 'fakeproject',
'epicollect_form': 'fakeform',
'formtype': 'json', 'form_name': 'epicollect'},
follow_redirects=True)
app = db.session.query(App).first()
assert len(app.tasks) == 2, "There should be only 2 tasks"
n = 0
epi_tasks = [{u'DeviceID': 23}, {u'DeviceID': 24}]
for t in app.tasks:
assert t.info == epi_tasks[n], "The task info should be the same"
n += 1
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_74_task_settings_page(self, mock):
"""Test WEB TASK SETTINGS page works"""
# Creat root user
self.register()
self.signout()
# As owner
self.register(fullname="owner", name="owner")
res = self.new_application()
url = "/app/sampleapp/tasks/settings"
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
divs = ['task_scheduler', 'task_delete', 'task_redundancy']
for div in divs:
err_msg = "There should be a %s section" % div
assert dom.find(id=div) is not None, err_msg
self.signout()
# As an authenticated user
self.register(fullname="juan", name="juan")
res = self.app.get(url, follow_redirects=True)
err_msg = "User should not be allowed to access this page"
assert res.status_code == 403, err_msg
self.signout()
# As an anonymous user
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "User should be redirected to sign in"
assert dom.find(id="signin") is not None, err_msg
# As root
self.signin()
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
divs = ['task_scheduler', 'task_delete', 'task_redundancy']
for div in divs:
err_msg = "There should be a %s section" % div
assert dom.find(id=div) is not None, err_msg
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_75_task_settings_scheduler(self, mock):
"""Test WEB TASK SETTINGS scheduler page works"""
# Creat root user
self.register()
self.signout()
# Create owner
self.register(fullname="owner", name="owner")
self.new_application()
url = "/app/sampleapp/tasks/scheduler"
form_id = 'task_scheduler'
self.signout()
# As owner and root
for i in range(0, 1):
if i == 0:
# As owner
self.signin(email="[email protected]")
sched = 'random'
else:
sched = 'default'
self.signin()
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "There should be a %s section" % form_id
assert dom.find(id=form_id) is not None, err_msg
res = self.task_settings_scheduler(short_name="sampleapp",
sched=sched)
dom = BeautifulSoup(res.data)
err_msg = "Task Scheduler should be updated"
assert dom.find(id='msg_success') is not None, err_msg
app = db.session.query(App).get(1)
assert app.info['sched'] == sched, err_msg
self.signout()
# As an authenticated user
self.register(fullname="juan", name="juan")
res = self.app.get(url, follow_redirects=True)
err_msg = "User should not be allowed to access this page"
assert res.status_code == 403, err_msg
self.signout()
# As an anonymous user
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "User should be redirected to sign in"
assert dom.find(id="signin") is not None, err_msg
# With hidden app
app.hidden = 1
db.session.add(app)
db.session.commit()
self.register(fullname="daniel", name="daniel")
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 403, res.status_code
self.signout()
self.signin()
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
# Correct values
err_msg = "There should be a %s section" % form_id
assert dom.find(id=form_id) is not None, err_msg
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_76_task_settings_redundancy(self, mock):
"""Test WEB TASK SETTINGS redundancy page works"""
# Creat root user
self.register()
self.signout()
# Create owner
self.register(fullname="owner", name="owner")
self.new_application()
self.new_task(1)
url = "/app/sampleapp/tasks/redundancy"
form_id = 'task_redundancy'
self.signout()
# As owner and root
for i in range(0, 1):
if i == 0:
# As owner
self.signin(email="[email protected]")
n_answers = 20
else:
n_answers = 10
self.signin()
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
# Correct values
err_msg = "There should be a %s section" % form_id
assert dom.find(id=form_id) is not None, err_msg
res = self.task_settings_redundancy(short_name="sampleapp",
n_answers=n_answers)
db.session.close()
dom = BeautifulSoup(res.data)
err_msg = "Task Redundancy should be updated"
assert dom.find(id='msg_success') is not None, err_msg
app = db.session.query(App).get(1)
for t in app.tasks:
assert t.n_answers == n_answers, err_msg
# Wrong values, triggering the validators
res = self.task_settings_redundancy(short_name="sampleapp",
n_answers=0)
dom = BeautifulSoup(res.data)
err_msg = "Task Redundancy should be a value between 0 and 1000"
assert dom.find(id='msg_error') is not None, err_msg
res = self.task_settings_redundancy(short_name="sampleapp",
n_answers=10000000)
dom = BeautifulSoup(res.data)
err_msg = "Task Redundancy should be a value between 0 and 1000"
assert dom.find(id='msg_error') is not None, err_msg
self.signout()
# As an authenticated user
self.register(fullname="juan", name="juan")
res = self.app.get(url, follow_redirects=True)
err_msg = "User should not be allowed to access this page"
assert res.status_code == 403, err_msg
self.signout()
# As an anonymous user
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "User should be redirected to sign in"
assert dom.find(id="signin") is not None, err_msg
# With hidden app
app.hidden = 1
db.session.add(app)
db.session.commit()
self.register(fullname="daniel", name="daniel")
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 403, res.status_code
self.signout()
self.signin()
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
# Correct values
err_msg = "There should be a %s section" % form_id
assert dom.find(id=form_id) is not None, err_msg
@with_context
def test_task_redundancy_update_updates_task_state(self):
"""Test WEB when updating the redundancy of the tasks in a project, the
state of the task is updated in consecuence"""
# Creat root user
self.register()
self.new_application()
self.new_task(1)
url = "/app/sampleapp/tasks/redundancy"
app = db.session.query(App).get(1)
for t in app.tasks:
tr = TaskRun(app_id=app.id, task_id=t.id)
db.session.add(tr)
db.session.commit()
err_msg = "Task state should be completed"
res = self.task_settings_redundancy(short_name="sampleapp",
n_answers=1)
for t in app.tasks:
assert t.state == 'completed', err_msg
res = self.task_settings_redundancy(short_name="sampleapp",
n_answers=2)
err_msg = "Task state should be ongoing"
db.session.add(app)
db.session.commit()
for t in app.tasks:
assert t.state == 'ongoing', t.state
@with_context
@patch('pybossa.view.applications.uploader.upload_file', return_value=True)
def test_77_task_settings_priority(self, mock):
"""Test WEB TASK SETTINGS priority page works"""
# Creat root user
self.register()
self.signout()
# Create owner
self.register(fullname="owner", name="owner")
self.new_application()
self.new_task(1)
url = "/app/sampleapp/tasks/priority"
form_id = 'task_priority'
self.signout()
# As owner and root
app = db.session.query(App).get(1)
_id = app.tasks[0].id
for i in range(0, 1):
if i == 0:
# As owner
self.signin(email="[email protected]")
task_ids = str(_id)
priority_0 = 1.0
else:
task_ids = "1"
priority_0 = 0.5
self.signin()
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
# Correct values
err_msg = "There should be a %s section" % form_id
assert dom.find(id=form_id) is not None, err_msg
res = self.task_settings_priority(short_name="sampleapp",
task_ids=task_ids,
priority_0=priority_0)
dom = BeautifulSoup(res.data)
err_msg = "Task Priority should be updated"
assert dom.find(id='msg_success') is not None, err_msg
task = db.session.query(Task).get(_id)
assert task.id == int(task_ids), err_msg
assert task.priority_0 == priority_0, err_msg
# Wrong values, triggering the validators
res = self.task_settings_priority(short_name="sampleapp",
priority_0=3,
task_ids="1")
dom = BeautifulSoup(res.data)
err_msg = "Task Priority should be a value between 0.0 and 1.0"
assert dom.find(id='msg_error') is not None, err_msg
res = self.task_settings_priority(short_name="sampleapp",
task_ids="1, 2")
dom = BeautifulSoup(res.data)
err_msg = "Task Priority task_ids should be a comma separated, no spaces, integers"
assert dom.find(id='msg_error') is not None, err_msg
res = self.task_settings_priority(short_name="sampleapp",
task_ids="1,a")
dom = BeautifulSoup(res.data)
err_msg = "Task Priority task_ids should be a comma separated, no spaces, integers"
assert dom.find(id='msg_error') is not None, err_msg
self.signout()
# As an authenticated user
self.register(fullname="juan", name="juan")
res = self.app.get(url, follow_redirects=True)
err_msg = "User should not be allowed to access this page"
assert res.status_code == 403, err_msg
self.signout()
# As an anonymous user
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "User should be redirected to sign in"
assert dom.find(id="signin") is not None, err_msg
# With hidden app
app.hidden = 1
db.session.add(app)
db.session.commit()
self.register(fullname="daniel", name="daniel")
res = self.app.get(url, follow_redirects=True)
assert res.status_code == 403, res.status_code
self.signout()
self.signin()
res = self.app.get(url, follow_redirects=True)
dom = BeautifulSoup(res.data)
# Correct values
err_msg = "There should be a %s section" % form_id
assert dom.find(id=form_id) is not None, err_msg
@with_context
def test_78_cookies_warning(self):
"""Test WEB cookies warning is displayed"""
# As Anonymous
res = self.app.get('/', follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "If cookies are not accepted, cookies banner should be shown"
assert dom.find(id='cookies_warning') is not None, err_msg
# As user
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.app.get('/', follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "If cookies are not accepted, cookies banner should be shown"
assert dom.find(id='cookies_warning') is not None, err_msg
self.signout()
# As admin
self.signin(email=Fixtures.root_addr, password=Fixtures.root_password)
res = self.app.get('/', follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "If cookies are not accepted, cookies banner should be shown"
assert dom.find(id='cookies_warning') is not None, err_msg
self.signout()
@with_context
def test_79_cookies_warning2(self):
"""Test WEB cookies warning is hidden"""
# As Anonymous
self.app.set_cookie("localhost", "PyBossa_accept_cookies", "Yes")
res = self.app.get('/', follow_redirects=True, headers={})
dom = BeautifulSoup(res.data)
err_msg = "If cookies are not accepted, cookies banner should be hidden"
assert dom.find(id='cookies_warning') is None, err_msg
# As user
self.signin(email=Fixtures.email_addr2, password=Fixtures.password)
res = self.app.get('/', follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "If cookies are not accepted, cookies banner should be hidden"
assert dom.find(id='cookies_warning') is None, err_msg
self.signout()
# As admin
self.signin(email=Fixtures.root_addr, password=Fixtures.root_password)
res = self.app.get('/', follow_redirects=True)
dom = BeautifulSoup(res.data)
err_msg = "If cookies are not accepted, cookies banner should be hidden"
assert dom.find(id='cookies_warning') is None, err_msg
self.signout()
@with_context
def test_user_with_no_more_tasks_find_volunteers(self):
"""Test WEB when a user has contributed to all available tasks, he is
asked to find new volunteers for a project, if the project is not
completed yet (overall progress < 100%)"""
self.register()
user = User.query.first()
app = AppFactory.create(owner=user)
task = TaskFactory.create(app=app)
taskrun = TaskRunFactory.create(task=task, user=user)
res = self.app.get('/app/%s/newtask' % app.short_name)
message = "Sorry, you've contributed to all the tasks for this project, but this project still needs more volunteers, so please spread the word!"
assert message in res.data
self.signout()
@with_context
def test_user_with_no_more_tasks_find_volunteers_project_completed(self):
"""Test WEB when a user has contributed to all available tasks, he is
not asked to find new volunteers for a project, if the project is
completed (overall progress = 100%)"""
self.register()
user = User.query.first()
app = AppFactory.create(owner=user)
task = TaskFactory.create(app=app, n_answers=1)
taskrun = TaskRunFactory.create(task=task, user=user)
res = self.app.get('/app/%s/newtask' % app.short_name)
assert task.state == 'completed', task.state
message = "Sorry, you've contributed to all the tasks for this project, but this project still needs more volunteers, so please spread the word!"
assert message not in res.data
self.signout()
| agpl-3.0 | -3,881,122,989,439,309,300 | 42.999418 | 153 | 0.581821 | false |
zozo123/buildbot | master/buildbot/test/unit/test_schedulers_manager.py | 1 | 6203 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from buildbot import config
from buildbot.schedulers import base
from buildbot.schedulers import manager
from twisted.internet import defer
from twisted.trial import unittest
class SchedulerManager(unittest.TestCase):
def setUp(self):
self.next_objectid = 13
self.objectids = {}
self.master = mock.Mock()
def getObjectId(sched_name, class_name):
k = (sched_name, class_name)
try:
rv = self.objectids[k]
except:
rv = self.objectids[k] = self.next_objectid
self.next_objectid += 1
return defer.succeed(rv)
self.master.db.state.getObjectId = getObjectId
self.new_config = mock.Mock()
self.sm = manager.SchedulerManager(self.master)
self.sm.startService()
def tearDown(self):
if self.sm.running:
return self.sm.stopService()
class Sched(base.BaseScheduler):
# changing sch.attr should make a scheduler look "updated"
compare_attrs = ('attr', )
already_started = False
reconfig_count = 0
def startService(self):
assert not self.already_started
assert self.master is not None
assert self.objectid is not None
self.already_started = True
base.BaseScheduler.startService(self)
def stopService(self):
d = base.BaseScheduler.stopService(self)
def still_set(_):
assert self.master is not None
assert self.objectid is not None
d.addCallback(still_set)
return d
class ReconfigSched(config.ReconfigurableServiceMixin, Sched):
def reconfigService(self, new_config):
self.reconfig_count += 1
new_sched = new_config.schedulers[self.name]
self.attr = new_sched.attr
return config.ReconfigurableServiceMixin.reconfigService(self,
new_config)
class ReconfigSched2(ReconfigSched):
pass
def makeSched(self, cls, name, attr='alpha'):
sch = cls(name=name, builderNames=['x'], properties={})
sch.attr = attr
return sch
# tests
@defer.inlineCallbacks
def test_reconfigService_add_and_change_and_remove(self):
sch1 = self.makeSched(self.ReconfigSched, 'sch1', attr='alpha')
self.new_config.schedulers = dict(sch1=sch1)
yield self.sm.reconfigService(self.new_config)
self.assertIdentical(sch1.parent, self.sm)
self.assertIdentical(sch1.master, self.master)
self.assertEqual(sch1.reconfig_count, 1)
sch1_new = self.makeSched(self.ReconfigSched, 'sch1', attr='beta')
sch2 = self.makeSched(self.ReconfigSched, 'sch2', attr='alpha')
self.new_config.schedulers = dict(sch1=sch1_new, sch2=sch2)
yield self.sm.reconfigService(self.new_config)
# sch1 is still the active scheduler, and has been reconfig'd,
# and has the correct attribute
self.assertIdentical(sch1.parent, self.sm)
self.assertIdentical(sch1.master, self.master)
self.assertEqual(sch1.attr, 'beta')
self.assertEqual(sch1.reconfig_count, 2)
self.assertIdentical(sch1_new.parent, None)
self.assertIdentical(sch1_new.master, None)
self.assertIdentical(sch2.parent, self.sm)
self.assertIdentical(sch2.master, self.master)
self.new_config.schedulers = {}
yield self.sm.reconfigService(self.new_config)
self.assertIdentical(sch1.parent, None)
self.assertIdentical(sch1.master, None)
@defer.inlineCallbacks
def test_reconfigService_class_name_change(self):
sch1 = self.makeSched(self.ReconfigSched, 'sch1')
self.new_config.schedulers = dict(sch1=sch1)
yield self.sm.reconfigService(self.new_config)
self.assertIdentical(sch1.parent, self.sm)
self.assertIdentical(sch1.master, self.master)
self.assertEqual(sch1.reconfig_count, 1)
sch1_new = self.makeSched(self.ReconfigSched2, 'sch1')
self.new_config.schedulers = dict(sch1=sch1_new)
yield self.sm.reconfigService(self.new_config)
# sch1 had its class name change, so sch1_new is now the active
# instance
self.assertIdentical(sch1_new.parent, self.sm)
self.assertIdentical(sch1_new.master, self.master)
@defer.inlineCallbacks
def test_reconfigService_add_and_change_and_remove_no_reconfig(self):
sch1 = self.makeSched(self.Sched, 'sch1', attr='alpha')
self.new_config.schedulers = dict(sch1=sch1)
yield self.sm.reconfigService(self.new_config)
self.assertIdentical(sch1.parent, self.sm)
self.assertIdentical(sch1.master, self.master)
sch1_new = self.makeSched(self.Sched, 'sch1', attr='beta')
sch2 = self.makeSched(self.Sched, 'sch2', attr='alpha')
self.new_config.schedulers = dict(sch1=sch1_new, sch2=sch2)
yield self.sm.reconfigService(self.new_config)
# sch1 is not longer active, and sch1_new is
self.assertIdentical(sch1.parent, None)
self.assertIdentical(sch1.master, None)
self.assertIdentical(sch1_new.parent, self.sm)
self.assertIdentical(sch1_new.master, self.master)
self.assertIdentical(sch2.parent, self.sm)
self.assertIdentical(sch2.master, self.master)
| gpl-3.0 | 4,587,122,281,276,174,300 | 34.855491 | 80 | 0.655167 | false |
tedye/leetcode | tools/leetcode.085.Maximal Rectangle/leetcode.085.Maximal Rectangle.submission11.py | 1 | 1212 | class Solution:
# @param matrix, a list of lists of 1 length string
# @return an integer
def maximalRectangle(self, matrix):
if not matrix: return 0
res = 0
line = [0] * len(matrix[0])
for i in matrix:
for j in range(len(matrix[0])):
if i[j] == '0':
line[j] = 0
else:
line[j] += 1
res = max(res,self.largestRectangleArea(line))
return res
def largestRectangleArea(self, height):
if not height: return 0
if len(height) < 3: return max(min(height) * len(height),max(height))
# record curHeigth index
stack = []
maxArea = 0
i = 0
height.append(0)
h = len(height)
while i < h:
if not stack or (height[i] > height[stack[-1]]):
stack.append(i)
else:
curH = stack.pop(-1)
if not stack:
maxArea = max(maxArea,height[curH] * i)
else:
maxArea = max(maxArea,height[curH] * (i - stack[-1] -1))
i-=1
i+=1
return maxArea | mit | -4,798,966,097,510,817,000 | 30.921053 | 77 | 0.453795 | false |
Donkyhotay/MoonPy | zope/rdb/gadfly/gfserve.py | 1 | 19287 | """gadfly server mode
script usage
python gfserve.py port database directory password [startup]
test example
python gfserve.py 2222 test dbtest admin gfstest
port is the port to listen to
database is the database to start up. (must exist!)
directory is the directory the database is in.
password is the administrative access password.
startup if present should be the name of a module to use
for startup. The Startup module must contain a function
Dict = startup(admin_policy, connection, Server_instance)
which performs any startup actions on the database needed
and returns either None or a Dictionary of
name > policy objects
where the policy objects describe policies beyond the
admin policy. The startup function may also
modify the admin_policy (disabling queries for example).
The arguments passed to startup are:
admin_policy: the administrative policy
eg you could turn queries off for admin, using admin
only for server maintenance, or you could add prepared
queries to the admin_policy.
connection: the database connection
eg you could perform some inserts before server start
also needed to make policies.
Server_instance
Included for additional customization.
Create policies using
P = gfserve.Policy(name, password, connection, queries=0)
-- for a "secure" policy with only prepared queries allowed,
or
P = gfserve.Policy(name, password, connection, queries=1)
-- for a policy with full access arbitrary statement
execution.
add a "named prepared statement" to a policy using
P[name] = statement
for example
P["updatenorm"] = '''
update frequents
set bar=?, perweek=?
where drinker='norm'
'''
in this case 'updatenorm' requires 2 dynamic parameters when
invoked from a client.
Script stdout lists server logging information.
Some server administration services (eg shutdown)
are implemented by the script interpretion of gfclient.py.
"""
import socket, gadfly
from gfsocket import \
reply_exception, reply_success, Packet_Reader, certify
def main():
"""start up the server."""
import sys
try:
done = 0
argv = sys.argv
nargs = len(argv)
#print nargs, argv
if nargs<5:
sys.stderr.write("gfserve: not enough arguments: %s\n\n" % argv)
sys.stderr.write(__doc__)
return
[port, db, dr, pw] = argv[1:5]
print "gfserve startup port=%s db=%s, dr=%s password omitted" % (
port, db, dr)
from string import atoi
port = atoi(port)
startup = None
if nargs>5:
startup = argv[5]
print "gfserve: load startup module %s" % startup
S = Server(port, db, dr, pw, startup)
S.init()
print "gfserve: server initialized, setting stderr=stdout"
sys.stderr = sys.stdout
print "gfserve: starting the server"
S.start()
done = 1
finally:
if not done:
print __doc__
# general error
ServerError = "ServerError"
# no such prepared name
PreparedNameError = "PreparedNameError"
# actions
# shut down the server (admin policy only)
# arguments = ()
# shutdown the server with no checkpoint
SHUTDOWN = "SHUTDOWN"
# restart the server (admin only)
# arguments = ()
# restart the server (recover)
# no checkpoint
RESTART = "RESTART"
# checkpoint the server (admin only)
# arguments = ()
# checkpoint the server
CHECKPOINT = "CHECKPOINT"
# exec prepared statement
# arguments = (prepared_name_string, dyn=None)
# execute the prepared statement with dynamic args.
# autocommit.
EXECUTE_PREPARED = "EXECUTE_PREPARED"
# exec any statement (only if not disabled)
# arguments = (statement_string, dyn=None)
# execute the statement with dynamic args.
# autocommit.
EXECUTE_STATEMENT = "EXECUTE_STATEMENT"
ACTIONS = [SHUTDOWN, RESTART, CHECKPOINT,
EXECUTE_PREPARED, EXECUTE_STATEMENT]
class Server:
"""database server: listen for commands"""
verbose = 1
# wait X minutes on each server loop
select_timeout = 60*5
# do a checkpoint each X times thru server loop
check_loop = 5
# for now works like finger/http
# == each command is a separate connection.
# all sql commands constitute separate transactions
# which are automatically committed upon success.
# for now commands come in as
# 1 length (marshalled int)
# 2 (password, data) (marshalled tuple)
# responses come back as
# 1 length (marshalled int)
# 2 results (marshalled value)
def __init__(self, port, db, dr, pw, startup=None):
self.port = port
self.db = db
self.dr = dr
self.pw = pw
self.startup = startup
self.connection = None
self.socket = None
# prepared cursors dictionary.
self.cursors = {}
self.policies = {}
self.admin_policy = None
def start(self):
"""after init, listen for commands."""
from gfsocket import READY, ERROR, unpack_certified_data
import sys
verbose = self.verbose
socket = self.socket
connection = self.connection
policies = self.policies
admin_policy = self.admin_policy
from select import select
pending_connects = {}
while 1:
try:
# main loop
if self.check_loop<0: self.check_loop=5
for i in xrange(self.check_loop):
if verbose:
print "main loop on", socket, connection
# checkpoint loop
sockets = [socket]
if pending_connects:
sockets = sockets + pending_connects.keys()
# wait for availability
if verbose:
print "server: waiting for connection(s)"
(readables, dummy, errors) = select(\
sockets, [], sockets[:], self.select_timeout)
if socket in errors:
raise ServerError, \
"listening socket in error state: aborting"
# clean up error connection sockets
for s in errors:
del pending_connects[s]
s.close()
# get a new connection, if available
if socket in readables:
readables.remove(socket)
(conn, addr) = socket.accept()
if 1 or verbose:
print "connect %s" % (addr,)
reader = Packet_Reader(conn)
pending_connects[conn] = reader
# poll readable pending connections, if possible
for conn in readables:
reader = pending_connects[conn]
mode = reader.mode
if not mode==READY:
if mode == ERROR:
# shouldn't happen
try:
conn.close()
del pending_connects[conn]
except: pass
continue
else:
try:
reader.poll()
finally:
pass # AFTER DEBUG CHANGE THIS!
# in blocking mode, service ready request,
# commit on no error
for conn in pending_connects.keys():
reader = pending_connects[conn]
mode = reader.mode
if mode == ERROR:
try:
del pending_connects[conn]
conn.close()
except: pass
elif mode == READY:
try:
del pending_connects[conn]
data = reader.data
(actor_name, cert, md) = \
unpack_certified_data(data)
# find the policy for this actor
if not policies.has_key(actor_name):
if verbose:
print "no such policy: "+actor_name
reply_exception(NameError,
"no such policy: "+actor_name, conn)
policy = None
else:
if verbose:
print "executing for", actor_name
policy = policies[actor_name]
policy.action(cert, md, conn)
except SHUTDOWN:
if policy is admin_policy:
print \
"shutdown on admin policy: terminating"
connection.close()
socket.close()
# NORMAL TERMINATION:
return
except RESTART:
if policy is admin_policy:
print \
"restart from admin policy: restarting connection"
connection.restart()
except CHECKPOINT:
if policy is admin_policy:
print \
"checkpoint from admin policy: checkpointing now."
connection.checkpoint()
except:
tb = sys.exc_traceback
info = "%s %s" % (sys.exc_type,
str(sys.exc_value))
if verbose:
from traceback import print_tb
print_tb(tb)
print "error in executing action: "+info
reply_exception(
ServerError, "exception: "+info, conn)
#break # stop after first request serviced!
except:
# except of main while 1 try statement
tb = sys.exc_traceback
ty = sys.exc_type
va = sys.exc_value
print "UNEXPECTED EXCEPTION ON MAINLOOP"
from traceback import print_tb
print_tb(tb)
print "exception:", ty, va
if not pending_connects:
pending_connects = {}
print "server: checkpointing"
connection.checkpoint()
def init(self):
self.getconnection()
self.startup_load()
# get socket last in case of failure earlier
self.getsocket()
HOST = ""
BACKLOG = 5
def getsocket(self):
"""get the listening socket"""
verbose = self.verbose
import socket, sys
if verbose:
print "initializing listener socket"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if verbose:
print "trying to set REUSEADDR",\
sock.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR)
sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
except:
if verbose:
print "set of REUSEADDR failed", sys.exc_type, sys.exc_value
pass
sock.bind((self.HOST, self.port))
sock.listen(self.BACKLOG)
self.socket = sock
return sock
def getconnection(self):
"""get the db connection"""
from gadfly import gadfly
c = self.connection = gadfly(self.db, self.dr)
# don't automatically checkpoint upon commit
c.autocheckpoint = 0
def startup_load(self):
"""setup the policies and load startup module"""
admin_policy = self.get_admin_policy()
module_name = self.startup
if module_name:
module = __import__(module_name)
# startup(admin_policy, connection, Server_instance)
test = module.startup(admin_policy, self.connection, self)
if test is not None:
self.policies = test
self.policies["admin"] = admin_policy
def get_admin_policy(self):
"""return the admin policy for priviledged access."""
p = self.admin_policy = Policy(
"admin", self.pw, self.connection, queries=1)
return p
class Policy:
"""security policy"""
verbose = 0
# allow arbitrary sql statments
general_queries = 0
# dictionary of named accesses as strings
named_accesses = None
# dictionary of prepared named accesses
prepared_cursors = None
def __init__(self, name, password, connection, queries=0):
"""create a policy (name, password, connection)
name is the name of the policy
password is the access policy (None for no password)
connection is the database connection.
set queries to allow general accesses (unrestricted)
"""
if self.verbose:
print "policy.__init__", name
self.general_queries = queries
self.name = name
self.password = password
self.connection = connection
self.socket = None
self.named_accesses = {}
self.prepared_cursors = {}
def __setitem__(self, name, value):
if self.verbose:
print "policy", self.name, ":", (name, value)
from types import StringType
if type(name) is not StringType or type(value) is not StringType:
raise ValueError, "cursor names and contents must be strings"
self.named_accesses[name] = value
def execute_named(self, name, params=None):
"""execute a named (prepared) sql statement"""
if self.verbose:
print "policy", self.name, "executes", name, params
na = self.named_accesses
pc = self.prepared_cursors
con = self.connection
if not na.has_key(name):
raise PreparedNameError, "unknown access name: %s" % name
stat = na[name]
if pc.has_key(name):
# get prepared query
cursor = pc[name]
else:
# prepare a new cursor
pc[name] = cursor = con.cursor()
return self.execute(cursor, stat, params)
def execute(self, cursor, statement, params=None):
"""execute a statement in a cursor"""
if self.verbose:
print "policy", self.name, "executes", statement, params
cursor.execute(statement, params)
# immediate commit!
self.connection.commit()
try:
result = cursor.fetchall()
description = cursor.description
result = (description, result)
except:
result = None
return result
def execute_any_statement(self, statement, params=None):
"""execute any statement."""
if self.verbose:
print "policy", self.name, "executes", statement, params
con = self.connection
cursor = con.cursor()
return self.execute(cursor, statement, params)
def action(self, certificate, datastring, socket):
"""perform a database/server action after checking certificate"""
verbose = self.verbose
if verbose:
print "policy", self.name, "action..."
# make sure the certificate checks out
if not self.certify(datastring, certificate, self.password):
raise ServerError, "password certification failure"
# unpack the datastring
from marshal import loads
test = loads(datastring)
#if verbose:
#print "data is", test
(action, moredata) = test
import sys
if action in ACTIONS:
action = "policy_"+action
myaction = getattr(self, action)
try:
data = apply(myaction, moredata+(socket,))
#self.reply_success(data)
# pass up server level requests as exceptions
except SHUTDOWN, detail:
raise SHUTDOWN, detail
except RESTART, detail:
raise RESTART, detail
except CHECKPOINT, detail:
raise CHECKPOINT, detail
except:
tb = sys.exc_traceback
exceptiondata = "%s\n%s" %(sys.exc_type,
str(sys.exc_value))
if verbose:
from traceback import print_tb
print_tb(tb)
self.reply_exception(ServerError,
"unexpected exception: "+exceptiondata, socket)
raise ServerError, exceptiondata
else:
raise ServerError, "unknown action: "+`action`
def certify(self, datastring, certificate, password):
# hook for subclassing
return certify(datastring, certificate, password)
def policy_SHUTDOWN(self, socket):
self.reply_success("attempting server shutdown", socket)
raise SHUTDOWN, "please shut down the server"
def policy_RESTART(self, socket):
self.reply_success("attempting server restart", socket)
raise RESTART, "please restart the server"
def policy_CHECKPOINT(self, socket):
self.reply_success("attempting server checkpoint", socket)
raise CHECKPOINT, "please checkpoint the server"
def policy_EXECUTE_PREPARED(self, name, dyn, socket):
try:
result = self.execute_named(name, dyn)
self.reply_success(result, socket)
except PreparedNameError, detail:
self.reply_exception(PreparedNameError,
"no such prepared statement: "+name,
socket)
def policy_EXECUTE_STATEMENT(self, stat, dyn, socket):
if not self.general_queries:
self.reply_exception(ServerError,
"general statements disallowed on this policy",
socket)
raise ServerError, "illegal statement attempt for: "+self.name
result = self.execute_any_statement(stat, dyn)
self.reply_success(result, socket)
def reply_exception(self, exc, info, socket):
# hook for subclassing
reply_exception(exc, info, socket)
def reply_success(self, data, socket):
# hook for subclassing
reply_success(data, socket)
if __name__=="__main__": main()
| gpl-3.0 | -7,174,110,584,602,300,000 | 35.459357 | 76 | 0.533313 | false |
sassoftware/rbuild | rbuild_test/unit_test/pluginstest/imagestest.py | 1 | 30355 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rbuild import errors
from testutils import mock
from rbuild_test import rbuildhelp
DESCRIPTOR_XML = '''\
<descriptor xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.rpath.com/permanent/descriptor-1.1.xsd" xsi:schemaLocation="http://www.rpath.com/permanent/descriptor-1.1.xsd descriptor-1.1.xsd" version="1.1">
<metadata>
<displayName>VMware Image Upload Parameters</displayName>
<rootElement>descriptor_data</rootElement>
<descriptions>
<desc>VMware Image Upload Parameters</desc>
</descriptions>
</metadata>
<dataFields>
<field>
<name>name</name>
<descriptions>
<desc>Name</desc>
</descriptions>
<type>str</type>
<constraints>
<length>4</length>
</constraints>
<required>true</required>
<hidden>false</hidden>
</field>
</dataFields>
</descriptor>
'''
DDATA_XML = '''\
<?xml version='1.0' encoding='UTF-8'?>
<descriptor_data>
<tag>foo</tag>
</descriptor_data>
'''
JOB_XML = '''\
<?xml version='1.0' encoding='UTF-8'?>
<job>
<descriptor>descriptor</descriptor>
<descriptor_data>
<tag>foo</tag>
</descriptor_data>
<job_type>job_type</job_type>
</job>
'''
class AbstractImagesTest(rbuildhelp.RbuildHelper):
def setUp(self):
rbuildhelp.RbuildHelper.setUp(self)
self.handle = self.getRbuildHandle(mock.MockObject())
self.handle.Cancel.registerCommands()
self.handle.Delete.registerCommands()
self.handle.List.registerCommands()
self.handle.Show.registerCommands()
self.handle.Images.registerCommands()
self.handle.Cancel.initialize()
self.handle.Delete.initialize()
self.handle.List.initialize()
self.handle.Show.initialize()
self.handle.Images.initialize()
class CancelImagesTest(AbstractImagesTest):
def setUp(self):
AbstractImagesTest.setUp(self)
self.cmd = self.handle.Commands.getCommandClass('cancel')()
def testCommand(self):
self.checkRbuild('cancel images',
'rbuild_plugins.images.CancelImagesCommand.runCommand',
[None, None, {}, ['cancel', 'images']])
self.checkRbuild('cancel images 1 2',
'rbuild_plugins.images.CancelImagesCommand.runCommand',
[None, None, {}, ['cancel', 'images', '1', '2']])
def testCommandParsing(self):
handle = self.handle
mock.mockMethod(handle.Images.cancel)
mock.mockMethod(handle.ui.warning)
mock.mockMethod(handle.facade.rbuilder.getImages, ['image'])
err = self.assertRaises(
errors.ParseError, self.cmd.runCommand, handle, {},
['rbuild', 'cancel', 'images'])
self.assertIn(': id', str(err))
self.cmd.runCommand(handle, {}, ['rbuild', 'cancel', 'images', '10'])
handle.Images.cancel._mock.assertCalled('image')
err = self.assertRaises(errors.BadParameterError,
self.cmd.runCommand, handle, {},
['rbuild', 'cancel', 'images', '&^%&*%$^&$'])
self.assertIn('Cannot parse', str(err))
def testLaunchArgParse(self):
self.checkRbuild('cancel images 10',
'rbuild_plugins.images.CancelImagesCommand.runCommand',
[None, None, {}, ['cancel', 'images', '10']])
def testNoImage(self):
from rbuild_plugins import images
mock.mockMethod(self.handle.Images.cancel)
mock.mockMethod(self.handle.Images._getProductStage,
('project', 'branch', 'stage'))
mock.mockMethod(self.handle.ui.warning)
mock.mockMethod(self.handle.ui.getYn, True)
mock.mockMethod(self.handle.facade.rbuilder.getImages, None)
self.cmd.runCommand(self.handle, {},
['rbuild', 'cancel', 'images', '10'])
self.handle.ui.warning._mock.assertCalled("Unable to find image with"
" id '10' on stage stage of project project")
self.handle.Images.cancel._mock.assertNotCalled()
class DeleteImagesTest(AbstractImagesTest):
def testCommandParsing(self):
handle = self.handle
cmd = handle.Commands.getCommandClass('delete')()
mock.mockMethod(handle.Images.delete)
mock.mockMethod(handle.ui.warning)
err = self.assertRaises(
errors.ParseError, cmd.runCommand, handle, {},
['rbuild', 'delete', 'images'])
self.assertIn('IMAGEID', str(err))
cmd.runCommand(handle, {}, ['rbuild', 'delete', 'images', '10', '11'])
handle.Images.delete._mock.assertCalled('10', False)
handle.Images.delete._mock.assertCalled('11', False)
cmd.runCommand(handle, {"force": True},
['rbuild', 'delete', 'images', '10'])
handle.Images.delete._mock.assertCalled('10', True)
cmd.runCommand(handle, {},
['rbuild', 'delete', 'images', '&^%&*%$^&$'])
handle.Images.delete._mock.assertNotCalled()
handle.ui.warning._mock.assertCalled(
"Cannot parse image id '&^%&*%$^&$'")
def testCommand(self):
self.checkRbuild('delete images',
'rbuild_plugins.images.DeleteImagesCommand.runCommand',
[None, None, {}, ['delete', 'images']])
self.checkRbuild('delete images 1 2',
'rbuild_plugins.images.DeleteImagesCommand.runCommand',
[None, None, {}, ['delete', 'images', '1', '2']])
self.checkRbuild('delete images --force 1 2',
'rbuild_plugins.images.DeleteImagesCommand.runCommand',
[None, None, {"force": True}, ['delete', 'images', '1', '2']])
class LaunchTest(AbstractImagesTest):
def testLaunchArgParse(self):
self.checkRbuild(
'launch --list --from-file=fromFile --to-file=toFile --no-launch'
' --no-watch Image Target',
'rbuild_plugins.images.LaunchCommand.runCommand',
[None, None, {
'list': True,
'from-file': 'fromFile',
'to-file': 'toFile',
'no-watch': True,
'no-launch': True,
}, ['rbuild', 'launch', 'Image', 'Target']])
self.checkRbuild(
'deploy --list --from-file=fromFile --to-file=toFile --no-launch'
' --no-watch Image Target',
'rbuild_plugins.images.LaunchCommand.runCommand',
[None, None, {
'list': True,
'from-file': 'fromFile',
'to-file': 'toFile',
'no-watch': True,
'no-launch': True,
}, ['rbuild', 'deploy', 'Image', 'Target']])
def testLaunchCmdlineList(self):
handle = self.handle
handle.Images.registerCommands()
handle.Images.initialize()
handle.ui = mock.MockObject()
_target_1 = mock.MockObject()
_target_1._mock.set(name='foo')
_target_2 = mock.MockObject()
_target_2._mock.set(name='bar')
_targets = [_target_1, _target_2]
mock.mockMethod(handle.facade.rbuilder.getEnabledTargets, _targets)
cmd = handle.Commands.getCommandClass('launch')()
cmd.runCommand(handle, {'list': True}, ['rbuild', 'launch'])
handle.ui.write._mock.assertCalled('Available targets: foo, bar')
def testLaunchCmdlineNoArgs(self):
handle = self.handle
cmd = handle.Commands.getCommandClass('launch')()
self.assertRaises(
errors.ParseError,
cmd.runCommand,
handle,
{},
['rbuild', 'launch'],
)
self.assertRaises(
errors.ParseError,
cmd.runCommand,
handle,
{},
['rbuild', 'launch', 'foo'],
)
def testLaunchCmdline(self):
handle = self.handle
mock.mockMethod(handle.Images.deployImage)
mock.mockMethod(handle.Images.launchImage)
mock.mockMethod(handle.Images.watchJob)
cmd = handle.Commands.getCommandClass('launch')()
cmd.runCommand(handle, {}, ['rbuild', 'launch', 'foo', 'bar'])
handle.Images.deployImage._mock.assertNotCalled()
handle.Images.launchImage._mock.assertCalled('foo', 'bar', True)
cmd = handle.Commands.getCommandClass('launch')()
cmd.runCommand(
handle, {}, ['rbuild', 'deploy', 'foo', 'bar'])
handle.Images.deployImage._mock.assertCalled('foo', 'bar', True)
handle.Images.launchImage._mock.assertNotCalled()
class ListImagesTest(AbstractImagesTest):
def testCommand(self):
self.getRbuildHandle()
self.checkRbuild('list images',
'rbuild_plugins.images.ListImagesCommand.runCommand',
[None, None, {}, ['list', 'images']])
self.checkRbuild('list images 1 2',
'rbuild_plugins.images.ListImagesCommand.runCommand',
[None, None, {}, ['list', 'images', '1', '2']])
def testLatestImages(self):
'''Regression test for APPENG-2788'''
from rbuild.pluginapi import command
handle = self.handle
handle.List.registerCommands()
handle.Delete.registerCommands()
handle.Images.initialize()
mock.mock(handle, 'ui')
_latest = mock.MockObject()
_latest._mock.set(id='http://localhost/latest')
_resource = mock.MockObject()
_resource._node._mock.set(latest_files=[_latest])
mock.mock(command.ListCommand, '_list', _resource)
cmd = handle.Commands.getCommandClass('list')()
cmd.runCommand(handle, {}, ['rbuild', 'list', 'images'])
handle.ui.write._mock.assertCalled('http://localhost/latest')
_latest._mock.set(id='http://localhost/latest%20image')
cmd.runCommand(handle, {}, ['rbuild', 'list', 'images'])
handle.ui.write._mock.assertCalled(
'http://localhost/latest%%20image')
class ShowImagesTest(AbstractImagesTest):
def testCommand(self):
self.getRbuildHandle()
self.checkRbuild('show images',
'rbuild_plugins.images.ListImagesCommand.runCommand',
[None, None, {}, ['show', 'images']])
self.checkRbuild('show images 1 2',
'rbuild_plugins.images.ListImagesCommand.runCommand',
[None, None, {}, ['show', 'images', '1', '2']])
class ImagesPluginTest(AbstractImagesTest):
def testCancel(self):
from rbuild_plugins import images
mock.mockMethod(self.handle.DescriptorConfig.createDescriptorData)
mock.mock(images, 'xobj')
_doc = mock.MockObject()
_doc._mock.enable('job')
images.xobj.Document._mock.setReturn(_doc)
_job = mock.MockObject()
_job._mock.enable('job_type', 'descriptor')
images.xobj.XObj._mock.setReturn(_job)
_image_action = mock.MockObject()
_image_action._mock.set(key=images.Images.CANCEL)
_image_action._root._mock.set(job_type='job_type',
descriptor='descriptor')
_image = mock.MockObject()
_image._mock.set(key=images.Images.CANCEL, image_id='10', status='100',
actions=[_image_action])
_image.jobs.append._mock.setReturn(_doc, _doc)
rv = self.handle.Images.cancel(_image)
self.assertEqual(rv, _doc)
self.assertEqual(rv.job, _job)
self.assertEqual('job_type', rv.job.job_type)
self.assertEqual('descriptor', rv.job.descriptor)
def testCancelNonBuilding(self):
from rbuild_plugins import images
_image = mock.MockObject()
_image._mock.set(status='300')
err = self.assertRaises(images.CancelImageError,
self.handle.Images.cancel, _image)
self.assertIn('not currently building', str(err))
def testCancelNoCancelAction(self):
from rbuild_plugins import images
_image = mock.MockObject()
_image._mock.set(status='100')
err = self.assertRaises(images.CancelImageError,
self.handle.Images.cancel, _image)
self.assertIn('cancel action', str(err))
def testCreateJob(self):
handle = self.handle
mock.mockMethod(
handle.Images._getProductStage, ('product', 'branch', 'stage'))
_jobs = []
def _append(x):
_jobs.append(x)
return x
_image = mock.MockObject()
_image._mock.set(jobs=mock.MockObject())
_image.jobs._mock.set(append=_append)
mock.mockMethod(handle.facade.rbuilder.getImages, _image)
_target = mock.MockObject()
_target._mock.set(credentials_valid='false', is_configured='false')
# test no matching target
mock.mockMethod(handle.facade.rbuilder.getTargets, [])
err = self.assertRaises(errors.PluginError,
handle.Images._createJob, handle.Images.LAUNCH, 'foo', 'bar', True)
self.assertIn('No target matching', str(err))
# test unconfigured target
handle.facade.rbuilder.getTargets._mock.setDefaultReturn([_target])
err = self.assertRaises(errors.PluginError,
handle.Images._createJob, handle.Images.LAUNCH, 'foo', 'bar', True)
self.assertIn('is not configured', str(err))
# test no credentials
_target._mock.set(is_configured='true')
err = self.assertRaises(errors.PluginError,
handle.Images._createJob, handle.Images.LAUNCH, 'foo', 'bar', True)
self.assertIn('have valid credentials', str(err))
_target._mock.set(credentials_valid='true')
_action = mock.MockObject()
_action._mock.set(descriptor=DESCRIPTOR_XML)
_action._root._mock.set(job_type='job_type')
_action._root._mock.set(descriptor='descriptor')
mock.mockMethod(handle.Images._getAction, (_image, _action))
_ddata = mock.MockObject()
_ddata.toxml._mock.setDefaultReturn(DDATA_XML)
mock.mockMethod(handle.DescriptorConfig.createDescriptorData, _ddata)
rv = handle.Images._createJob(
handle.Images.DEPLOY, 'foo', 'bar', True)
handle.facade.rbuilder.getImages._mock.assertCalled(
name='foo',
project='product',
branch='branch',
stage='stage',
order_by='-time_created',
)
handle.Images._getAction._mock.assertCalled(
_image, _target, handle.Images.DEPLOY)
self.assertEqual(len(_jobs), 1)
self.assertEqual(rv, _jobs[0])
self.assertEqual(rv.toxml(), JOB_XML)
rv = handle.Images._createJob(
handle.Images.DEPLOY, 'foo=', 'bar', True)
handle.facade.rbuilder.getImages._mock.assertCalled(
name='foo',
project='product',
branch='branch',
stage='stage',
order_by='-time_created',
)
handle.Images._getAction._mock.assertCalled(
_image, _target, handle.Images.DEPLOY)
self.assertEqual(len(_jobs), 2)
self.assertEqual(rv, _jobs[1])
self.assertEqual(rv.toxml(), JOB_XML)
rv = handle.Images._createJob(
handle.Images.DEPLOY, 'foo=1', 'bar', True)
handle.facade.rbuilder.getImages._mock.assertCalled(
name='foo',
project='product',
branch='branch',
stage='stage',
order_by='-time_created',
trailing_version='1',
)
handle.Images._getAction._mock.assertCalled(
_image, _target, handle.Images.DEPLOY)
self.assertEqual(len(_jobs), 3)
self.assertEqual(rv, _jobs[2])
self.assertEqual(rv.toxml(), JOB_XML)
rv = handle.Images._createJob(
handle.Images.DEPLOY, '1', 'bar', True)
handle.facade.rbuilder.getImages._mock.assertCalled(
image_id='1',
project='product',
branch='branch',
stage='stage',
order_by='-time_created',
)
handle.Images._getAction._mock.assertCalled(
_image, _target, handle.Images.DEPLOY)
self.assertEqual(len(_jobs), 4)
self.assertEqual(rv, _jobs[3])
self.assertEqual(rv.toxml(), JOB_XML)
def testCreateJobNoImages(self):
'''Regression test for APPENG-2803'''
from rbuild_plugins import images
handle = self.handle
handle.Images.registerCommands()
handle.Images.initialize()
mock.mockMethod(
handle.Images._getProductStage, ('product', 'branch', 'stage'))
mock.mockMethod(handle.facade.rbuilder.getImages, None)
err = self.assertRaises(
images.MissingImageError,
handle.Images._createJob,
handle.Images.DEPLOY,
'none',
'bar',
True,
)
self.assertIn('Unable to find', str(err))
def testDelete(self):
handle = self.handle
_image = mock.MockObject(name="foo")
mock.mockMethod(handle.facade.rbuilder.getImages)
handle.facade.rbuilder.getImages._mock.setReturn(
[_image], image_id="10", project="project", branch="branch",
stage="stage", order_by="-time_created")
mock.mockMethod(handle.Images._getProductStage,
('project', 'branch', 'stage'))
mock.mockMethod(handle.ui.getYn, False)
handle.ui.getYn._mock.appendReturn(True, "Delete foo?", default=False)
handle.Images.delete("10", force=True)
handle.ui.getYn._mock.assertNotCalled()
handle.facade.rbuilder.getImages._mock.assertCalled(
image_id="10", project='project', branch='branch', stage='stage',
order_by="-time_created")
_image.delete._mock.assertCalled()
handle.Images.delete("10")
handle.facade.rbuilder.getImages._mock.assertCalled(
image_id="10", project='project', branch='branch', stage='stage',
order_by="-time_created")
_image.delete._mock.assertCalled()
def testDeleteMissing(self):
from rbuild_plugins import images
handle = self.handle
mock.mockMethod(handle.facade.rbuilder.getImages, None)
mock.mockMethod(handle.Images._getProductStage,
('project', 'branch', 'stage'))
self.assertRaises(images.MissingImageError, handle.Images.delete, "10")
handle.facade.rbuilder.getImages._mock.assertCalled(
image_id="10", project='project', branch='branch', stage='stage',
order_by="-time_created")
def testDeleteNoProduct(self):
handle = self.handle
mock.mockMethod(handle.facade.rbuilder.getImages)
mock.mockMethod(handle.Images._getProductStage)
handle.Images._getProductStage._mock.raiseErrorOnAccess(
errors.MissingProductStoreError(path='/foo'))
self.assertRaises(errors.MissingProductStoreError,
handle.Images.delete, 10)
handle.facade.rbuilder.getImages._mock.assertNotCalled()
def testDeleteNoStage(self):
handle = self.handle
mock.mockMethod(handle.facade.rbuilder.getImages)
mock.mockMethod(handle.Images._getProductStage)
handle.Images._getProductStage._mock.raiseErrorOnAccess(
errors.MissingActiveStageError(path='/foo'))
self.assertRaises(errors.MissingActiveStageError,
handle.Images.delete, 10)
handle.facade.rbuilder.getImages._mock.assertNotCalled()
def testGetAction(self):
handle = self.handle
handle.Images.registerCommands()
handle.Images.initialize()
self.assertRaises(
AssertionError, handle.Images._getAction, None, None, 'foo')
_action1 = mock.MockObject()
_action1._mock.set(key=handle.Images.DEPLOY)
_action1._mock.set(name="Deploy image on 'foo' (vmware)")
_action2 = mock.MockObject()
_action2._mock.set(key=handle.Images.DEPLOY)
_action2._mock.set(name="Deploy image on 'bar' (vmware)")
_image = mock.MockObject()
_image._mock.set(actions=[_action1, _action2])
_target = mock.MockObject()
_target._mock.set(name='foo')
self.assertRaises(
errors.PluginError,
handle.Images._getAction,
[_image],
_target,
handle.Images.DEPLOY,
)
_target._mock.set(name='baz')
_image._mock.set(status='300')
self.assertRaises(
errors.PluginError,
handle.Images._getAction,
[_image],
_target,
handle.Images.DEPLOY,
)
_target._mock.set(name='foo')
rv = handle.Images._getAction([_image], _target, handle.Images.DEPLOY)
self.assertEqual(rv, (_image, _action1))
_target._mock.set(name='bar')
rv = handle.Images._getAction([_image], _target, handle.Images.DEPLOY)
self.assertEqual(rv, (_image, _action2))
def testList(self):
handle = self.handle
mock.mockMethod(handle.facade.rbuilder.getImages)
mock.mockMethod(handle.Images._getProductStage)
handle.Images._getProductStage._mock.setReturn(
('project', 'branch', 'stage'))
handle.Images.list()
handle.facade.rbuilder.getImages._mock.assertCalled(
project='project', branch='branch', stage='stage')
def testListNoProduct(self):
handle = self.handle
mock.mockMethod(handle.facade.rbuilder.getImages)
mock.mockMethod(handle.Images._getProductStage)
handle.Images._getProductStage._mock.raiseErrorOnAccess(
errors.MissingProductStoreError(path='/foo'))
self.assertRaises(errors.MissingProductStoreError, handle.Images.list)
handle.facade.rbuilder.getImages._mock.assertNotCalled()
def testListNoStage(self):
handle = self.handle
mock.mockMethod(handle.facade.rbuilder.getImages)
mock.mockMethod(handle.Images._getProductStage)
handle.Images._getProductStage._mock.raiseErrorOnAccess(
errors.MissingActiveStageError(path='/foo'))
self.assertRaises(errors.MissingActiveStageError,
handle.Images.list)
handle.facade.rbuilder.getImages._mock.assertNotCalled()
def testShow(self):
handle = self.handle
mock.mock(handle, 'productStore')
mock.mock(handle, 'product')
handle.product.getProductShortname._mock.setReturn('project')
handle.productStore.getActiveStageName._mock.setReturn('stage')
handle.product.getBaseLabel._mock.setReturn('branch')
mock.mockMethod(handle.facade.rbuilder.getImages, ['image'])
rv = handle.Images.show(10)
self.assertEqual(rv, 'image')
handle.facade.rbuilder.getImages._mock.assertCalled(
image_id=10, project='project', branch='branch', stage='stage')
def testShowMissing(self):
handle = self.handle
mock.mock(handle, 'productStore')
mock.mock(handle, 'product')
mock.mock(handle, 'ui')
handle.product.getProductShortname._mock.setReturn('project')
handle.product.getBaseLabel._mock.setReturn('branch')
handle.productStore.getActiveStageName._mock.setReturn('stage')
mock.mockMethod(handle.facade.rbuilder.getImages, None)
handle.Images.show(10)
handle.facade.rbuilder.getImages._mock.assertCalled(
image_id=10, project='project', branch='branch', stage='stage')
def testShowNoProduct(self):
handle = self.handle
mock.mockMethod(handle.facade.rbuilder.getImages)
mock.mockMethod(handle.Images._getProductStage)
handle.Images._getProductStage._mock.raiseErrorOnAccess(
errors.MissingProductStoreError(path='/foo'))
self.assertRaises(errors.MissingProductStoreError,
handle.Images.delete, 10)
handle.facade.rbuilder.getImages._mock.assertNotCalled()
def testShowNoStage(self):
handle = self.handle
mock.mockMethod(handle.facade.rbuilder.getImages)
mock.mockMethod(handle.Images._getProductStage)
handle.Images._getProductStage._mock.raiseErrorOnAccess(
errors.MissingActiveStageError(path='/foo'))
self.assertRaises(errors.MissingActiveStageError,
handle.Images.show, 10)
handle.facade.rbuilder.getImages._mock.assertNotCalled()
def testWatchJob(self):
from rbuild_plugins.images import time
handle = self.handle
mock.mock(handle.ui, 'outStream')
mock.mock(time, 'ctime', '')
mock.mock(time, 'sleep')
_job = mock.MockObject()
_job.job_state._mock.set(name='Failed')
_job.job_type._mock.set(name='launch system on taraget')
self.assertRaises(errors.PluginError, handle.Images.watchJob, _job)
_status_text = ['Text4', 'Text3 ', 'Text2 ', 'Text1 ']
_network1 = mock.MockObject()
_network1._mock.set(dns_name='foo')
_network2 = mock.MockObject()
_network2._mock.set(dns_name='bar')
_resource = mock.MockObject()
_resource._mock.set(name='baz')
_resource._mock.set(networks=[_network1, _network2])
def _refresh():
try:
_job._mock.set(status_text=_status_text.pop())
except IndexError:
_job.job_state._mock.set(name='Completed')
_job._mock.set(created_resources=[_resource])
_job._mock.set(refresh=_refresh)
_job.job_state._mock.set(name='Running')
_job._mock.set(status_text='Text0 ')
handle.ui.outStream.isatty._mock.setDefaultReturn(True)
handle.Images.watchJob(_job)
expected_calls = [
(('\r[] Text0 ',), ()),
(('\r[] Text1 ',), ()),
((' \b\b',), ()),
(('\r[] Text2 ',), ()),
((' \b\b',), ()),
(('\r[] Text3 ',), ()),
((' \b\b',), ()),
(('\r[] Text4',), ()),
((' \b\b',), ()),
(('\n',), ()),
(('Created system baz with addresses: foo, bar\n',), ()),
]
self.assertEqual(handle.ui.outStream.write._mock.calls, expected_calls)
_status_text = ['Text4', 'Text3 ', 'Text2 ', 'Text1 ']
_job.job_state._mock.set(name='Running')
_job._mock.set(status_text='Text0 ')
handle.ui.outStream.write._mock.calls = []
handle.ui.outStream.isatty._mock.setDefaultReturn(False)
handle.Images.watchJob(_job)
expected_calls = [
(('[] Text0 \n',), ()),
(('[] Text1 \n',), ()),
(('[] Text2 \n',), ()),
(('[] Text3 \n',), ()),
(('[] Text4\n',), ()),
(('Created system baz with addresses: foo, bar\n',), ()),
]
self.assertEqual(handle.ui.outStream.write._mock.calls, expected_calls)
def testGetImages(self):
handle = self.handle
kwargs = dict((x, x) for x in ("project", "stage", "branch"))
kwargs["order_by"] = "-time_created"
_image1 = mock.MockObject(name="foo", trailing_version="1-1-1", id="1")
_image2 = mock.MockObject(name="foo", trailing_version="2-2-2", id="2")
rb = handle.facade.rbuilder
mock.mockMethod(rb.getImages, None)
mock.mockMethod(handle.Images._getProductStage,
("project", "branch", "stage"))
rb.getImages._mock.appendReturn([_image1, _image2], name="foo",
**kwargs)
rb.getImages._mock.appendReturn([_image2], name="foo",
trailing_version="2-2-2", **kwargs)
rb.getImages._mock.appendReturn([_image1], image_id="1", **kwargs)
self.assertEqual([_image1], handle.Images.getImages("1"))
self.assertEqual([_image1, _image2], handle.Images.getImages("foo"))
self.assertEqual([_image2], handle.Images.getImages("foo=2-2-2"))
def testGetImagesMissing(self):
from rbuild_plugins import images
handle = self.handle
rb = handle.facade.rbuilder
mock.mockMethod(rb.getImages, None)
self.assertRaises(images.MissingImageError, handle.Images.getImages,
"10")
def testGetImage(self):
from rbuild_plugins import images
handle = self.handle
rb = handle.facade.rbuilder
_image1 = mock.MockObject(name="foo", trailing_version="1-1-1", id="1")
_image2 = mock.MockObject(name="foo", trailing_version="2-2-2", id="2")
kwargs = dict((x, x) for x in ("project", "stage", "branch"))
kwargs["order_by"] = "-time_created"
mock.mockMethod(rb.getImages, None)
rb.getImages._mock.appendReturn([_image1, _image2], name="foo",
**kwargs)
rb.getImages._mock.appendReturn([_image2], name="foo",
trailing_version="2-2-2", **kwargs)
rb.getImages._mock.appendReturn([_image1], image_id="1", **kwargs)
mock.mockMethod(handle.Images._getProductStage,
("project", "branch", "stage"))
self.assertRaises(images.MissingImageError, handle.Images.getImage, "5")
err = self.assertRaises(errors.PluginError, handle.Images.getImage,
"foo")
self.assertIn("Matched more than one image", str(err))
self.assertEqual(_image1, handle.Images.getImage("1"))
self.assertEqual(_image2, handle.Images.getImage("foo=2-2-2"))
| apache-2.0 | 6,391,575,988,577,512,000 | 35.883354 | 228 | 0.597035 | false |
polltooh/FineGrainedAction | data/crawler.py | 1 | 2670 | #! /usr/bin/env python
import urllib2
import os
import json
from bing_search_api import BingSearchAPI
import string
import sys
def read_bing_key():
with open('bing_key.txt', 'r') as f:
bing_key = f.read()
bing_key = bing_key.replace('\n','')
return bing_key
def get_format(format):
format_list = format.split("?")
if (len(format_list) > 1):
format = format_list[0]
format_list = format.split("%")
if (len(format_list) > 1):
format = format_list[0]
return format
def download_single_image(url, search_query, title_name):
format = url.split('.')[-1]
format = get_format(format)
if (format == 'gif'):
print('gif')
return
dir_name = "image/" + search_query.replace(' ','_')
if not (os.path.isdir(dir_name)):
os.mkdir(dir_name)
valid_chars = "-_() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in title_name if c in valid_chars)
req = urllib2.Request(url)
req.add_header('Accept',
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
req.add_header('user-agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/537.13 (KHTML, like Gecko) Chrome/24.0.1290.1 Safari/537.13')
try:
webpage = urllib2.urlopen(req).read()
full_file_name = dir_name + '/' + filename + '.' + format
f = open(full_file_name, 'wb')
f.write(webpage)
f.close()
except:
print(url)
def crawl_from_bing(search_query):
my_key = read_bing_key()
# search_query = "nba jumpshot"
bing = BingSearchAPI(my_key)
for i in range(20):
params = {
'$format': 'json',
'$top': 50,
'$skip': i * 50}
result_list = bing.search('image',search_query,params).json()
print(len(result_list['d']['results'][0]['Image']))
for result in result_list['d']['results'][0]['Image']:
image_url = (result['MediaUrl'])
title_name = result['Title'].encode('gbk', 'ignore').decode(encoding="utf-8", errors="ignore")
title_name = title_name.replace('... ','')
download_single_image(image_url, search_query, title_name)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: crawler search_query")
exit(1)
search_query = ""
for i in range(len(sys.argv)):
if (i == 0):
continue
search_query += sys.argv[i] + " "
search_query = search_query[:-1]
crawl_from_bing(search_query)
| mit | -8,592,267,490,842,399,000 | 30.168675 | 129 | 0.549438 | false |
agarciamontoro/TFG | Software/RK4/eqparser.py | 1 | 2387 | import re
class EquationSystem():
def __init__(self,equations):
self.raw_equations = equations
self.left_hand_sides = []
self.right_hand_sides = []
self.independent_variable = None
self.derivarives = []
self.left_form1 = re.compile(r"^\s*(.)\'\s*$")
self.left_form2 = re.compile(r"^\s*(.)\'\((.)\)\s*$")
def _separe_sides(self):
for equation in self.raw_equations:
left, right = equation.split('=')
self.left_hand_sides.append(left)
self.right_hand_sides.append(right)
assert( len(self.left_hand_sides) == len(self.right_hand_sides) )
def _process_left_hand_sides(self):
for left_side in self.left_hand_sides:
form1 = self.left_form1.match(left_side)
form2 = self.left_form2.match(left_side)
if form1:
self.derivarives.append( form1.group(1) )
independent_variable = 'x'
elif form2:
self.derivarives.append(form2.group(1))
independent_variable = form2.group(2)
else:
raise RuntimeError("""
Invalid left hand side: {}.
The left hand side must be one of the two following forms:
- __var__'
- __var__'( __independent_var__ )
""".format(left_side))
if self.independent_variable is None:
self.independent_variable = independent_variable
else:
assert( self.independent_variable == independent_variable )
def _transform_right_hand_sides(self):
transform_map = {variable:'__var__[{}]'.format(index) for index, variable in enumerate(self.derivarives) }
transform_map.update({self.independent_variable:'x'})
for index,right_side in enumerate(self.right_hand_sides):
new_right_side = right_side
for variable,replacement in transform_map.items():
new_right_side = new_right_side.replace( variable, replacement)
#Delete any (x) floating around
new_right_side = new_right_side.replace("](x)","]")
yield new_right_side
def parse(self):
self._separe_sides()
self._process_left_hand_sides()
return list( self._transform_right_hand_sides() )
| gpl-2.0 | 2,274,514,264,007,813,600 | 33.1 | 114 | 0.558023 | false |
colour-science/colour | colour/appearance/tests/test_cam16.py | 1 | 9489 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.appearance.cam16` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.appearance import (VIEWING_CONDITIONS_CAM16,
InductionFactors_CAM16, CAM_Specification_CAM16,
XYZ_to_CAM16, CAM16_to_XYZ)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['TestXYZ_to_CAM16', 'TestCAM16_to_XYZ']
class TestXYZ_to_CAM16(unittest.TestCase):
"""
Defines :func:`colour.appearance.cam16.XYZ_to_CAM16` definition unit
tests methods.
"""
def test_XYZ_to_CAM16(self):
"""
Tests :func:`colour.appearance.cam16.XYZ_to_CAM16` definition.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_A = 318.31
Y_b = 20.0
surround = VIEWING_CONDITIONS_CAM16['Average']
np.testing.assert_almost_equal(
XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround),
np.array([
41.73120791, 0.10335574, 217.06795977, 2.34501507,
195.37170899, 0.10743677, 275.59498615, np.nan
]),
decimal=7)
XYZ = np.array([57.06, 43.06, 31.96])
L_A = 31.83
np.testing.assert_almost_equal(
XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround),
np.array([
65.42828069, 49.67956420, 17.48659243, 52.94308868,
152.06985268, 42.62473321, 398.03047943, np.nan
]),
decimal=7)
XYZ = np.array([3.53, 6.56, 2.14])
XYZ_w = np.array([109.85, 100, 35.58])
L_A = 318.31
np.testing.assert_almost_equal(
XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround),
np.array([
21.36052893, 50.99381895, 178.86724266, 61.57953092,
139.78582768, 53.00732582, 223.01823806, np.nan
]),
decimal=7)
XYZ = np.array([19.01, 20.00, 21.78])
L_A = 318.31
np.testing.assert_almost_equal(
XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround),
np.array([
41.36326063, 52.81154022, 258.88676291, 53.12406914,
194.52011798, 54.89682038, 311.24768647, np.nan
]),
decimal=7)
XYZ = np.array([61.45276998, 7.00421901, 82.2406738])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_A = 4.074366543152521
np.testing.assert_almost_equal(
XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround),
np.array([
21.03801957, 457.78881613, 350.06445098, 241.50642846,
56.74143988, 330.94646237, 376.43915877, np.nan
]),
decimal=7)
@ignore_numpy_errors
def test_domain_range_scale_XYZ_to_CAM16(self):
"""
Tests :func:`colour.appearance.cam16.XYZ_to_CAM16` definition domain
and range scale support.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_A = 318.31
Y_b = 20.0
surround = VIEWING_CONDITIONS_CAM16['Average']
specification = XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround)
d_r = (
('reference', 1, 1),
(1, 0.01,
np.array([
1 / 100, 1 / 100, 1 / 360, 1 / 100, 1 / 100, 1 / 100, 1 / 400,
np.nan
])),
(100, 1, np.array([1, 1, 100 / 360, 1, 1, 1, 100 / 400, np.nan])),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_CAM16(XYZ * factor_a, XYZ_w * factor_a, L_A, Y_b,
surround),
specification * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_CAM16(self):
"""
Tests :func:`colour.appearance.cam16.XYZ_to_CAM16` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_w = np.array(case)
L_A = case[0]
Y_b = case[0]
surround = InductionFactors_CAM16(case[0], case[0], case[0])
XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround)
class TestCAM16_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.appearance.cam16.CAM16_to_XYZ` definition unit tests
methods.
"""
def test_CAM16_to_XYZ(self):
"""
Tests :func:`colour.appearance.cam16.CAM16_to_XYZ` definition.
"""
specification = CAM_Specification_CAM16(41.73120791, 0.10335574,
217.06795977)
XYZ_w = np.array([95.05, 100.00, 108.88])
L_A = 318.31
Y_b = 20.0
surround = VIEWING_CONDITIONS_CAM16['Average']
np.testing.assert_almost_equal(
CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b, surround),
np.array([19.01, 20.00, 21.78]),
decimal=7)
specification = CAM_Specification_CAM16(65.42828069, 49.67956420,
17.48659243)
L_A = 31.83
np.testing.assert_almost_equal(
CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b, surround),
np.array([57.06, 43.06, 31.96]),
decimal=7)
specification = CAM_Specification_CAM16(21.36052893, 50.99381895,
178.86724266)
XYZ_w = np.array([109.85, 100, 35.58])
L_A = 318.31
np.testing.assert_almost_equal(
CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b, surround),
np.array([3.53, 6.56, 2.14]),
decimal=7)
specification = CAM_Specification_CAM16(41.36326063, 52.81154022,
258.88676291)
L_A = 318.31
np.testing.assert_almost_equal(
CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b, surround),
np.array([19.01, 20.00, 21.78]),
decimal=7)
specification = CAM_Specification_CAM16(21.03801957, 457.78881613,
350.06445098)
XYZ_w = np.array([95.05, 100.00, 108.88])
L_A = 4.074366543152521
np.testing.assert_almost_equal(
CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b, surround),
np.array([61.45276998, 7.00421901, 82.2406738]),
decimal=7)
@ignore_numpy_errors
def test_domain_range_scale_CAM16_to_XYZ(self):
"""
Tests :func:`colour.appearance.cam16.CAM16_to_XYZ` definition domain
and range scale support.
"""
XYZ_i = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_A = 318.31
Y_b = 20.0
surround = VIEWING_CONDITIONS_CAM16['Average']
specification = XYZ_to_CAM16(XYZ_i, XYZ_w, L_A, Y_b, surround)
XYZ = CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b, surround)
d_r = (
('reference', 1, 1, 1),
(1,
np.array([
1 / 100, 1 / 100, 1 / 360, 1 / 100, 1 / 100, 1 / 100, 1 / 400,
np.nan
]), 0.01, 0.01),
(100, np.array([1, 1, 100 / 360, 1, 1, 1, 100 / 400, np.nan]), 1,
1),
)
for scale, factor_a, factor_b, factor_c in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
CAM16_to_XYZ(specification * factor_a, XYZ_w * factor_b,
L_A, Y_b, surround),
XYZ * factor_c,
decimal=7)
@ignore_numpy_errors
def test_raise_exception_CAM16_to_XYZ(self):
"""
Tests :func:`colour.appearance.cam16.CAM16_to_XYZ` definition raised
exception.
"""
try:
CAM16_to_XYZ(
CAM_Specification_CAM16(
41.731207905126638,
None,
217.06795976739301,
),
np.array([95.05, 100.00, 108.88]),
318.31,
20.0,
VIEWING_CONDITIONS_CAM16['Average'],
)
except ValueError:
pass
@ignore_numpy_errors
def test_nan_CAM16_to_XYZ(self):
"""
Tests :func:`colour.appearance.cam16.CAM16_to_XYZ` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
J = case[0]
C = case[0]
h = case[0]
XYZ_w = np.array(case)
L_A = case[0]
Y_b = case[0]
surround = InductionFactors_CAM16(case[0], case[0], case[0])
CAM16_to_XYZ(
CAM_Specification_CAM16(J, C, h), XYZ_w, L_A, Y_b, surround)
| bsd-3-clause | 3,703,799,015,079,945,700 | 34.144444 | 79 | 0.505322 | false |
afrolov1/nova | nova/virt/vmwareapi/ds_util.py | 1 | 5598 | # Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Datastore utility functions
"""
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
def build_datastore_path(datastore_name, path):
"""Build the datastore compliant path."""
return "[%s] %s" % (datastore_name, path)
def split_datastore_path(datastore_path):
"""Return the datastore and path from a datastore_path.
Split the VMware style datastore path to get the Datastore
name and the entity path.
"""
spl = datastore_path.split('[', 1)[1].split(']', 1)
path = ""
if len(spl) == 1:
datastore_name = spl[0]
else:
datastore_name, path = spl
return datastore_name, path.strip()
def file_delete(session, datastore_path, dc_ref):
LOG.debug(_("Deleting the datastore file %s"), datastore_path)
vim = session._get_vim()
file_delete_task = session._call_method(
session._get_vim(),
"DeleteDatastoreFile_Task",
vim.get_service_content().fileManager,
name=datastore_path,
datacenter=dc_ref)
session._wait_for_task(file_delete_task)
LOG.debug(_("Deleted the datastore file"))
def file_move(session, dc_ref, src_file, dst_file):
"""Moves the source file or folder to the destination.
The list of possible faults that the server can return on error
include:
- CannotAccessFile: Thrown if the source file or folder cannot be
moved because of insufficient permissions.
- FileAlreadyExists: Thrown if a file with the given name already
exists at the destination.
- FileFault: Thrown if there is a generic file error
- FileLocked: Thrown if the source file or folder is currently
locked or in use.
- FileNotFound: Thrown if the file or folder specified by sourceName
is not found.
- InvalidDatastore: Thrown if the operation cannot be performed on
the source or destination datastores.
- NoDiskSpace: Thrown if there is not enough space available on the
destination datastore.
- RuntimeFault: Thrown if any type of runtime fault is thrown that
is not covered by the other faults; for example,
a communication error.
"""
LOG.debug(_("Moving file from %(src)s to %(dst)s."),
{'src': src_file, 'dst': dst_file})
vim = session._get_vim()
move_task = session._call_method(
session._get_vim(),
"MoveDatastoreFile_Task",
vim.get_service_content().fileManager,
sourceName=src_file,
sourceDatacenter=dc_ref,
destinationName=dst_file,
destinationDatacenter=dc_ref)
session._wait_for_task(move_task)
LOG.debug(_("File moved"))
def file_exists(session, ds_browser, ds_path, file_name):
"""Check if the file exists on the datastore."""
client_factory = session._get_vim().client.factory
search_spec = vm_util.search_datastore_spec(client_factory, file_name)
search_task = session._call_method(session._get_vim(),
"SearchDatastore_Task",
ds_browser,
datastorePath=ds_path,
searchSpec=search_spec)
try:
task_info = session._wait_for_task(search_task)
except error_util.FileNotFoundException:
return False
file_exists = (getattr(task_info.result, 'file', False) and
task_info.result.file[0].path == file_name)
return file_exists
def mkdir(session, ds_path, dc_ref):
"""Creates a directory at the path specified. If it is just "NAME",
then a directory with this name is created at the topmost level of the
DataStore.
"""
LOG.debug(_("Creating directory with path %s"), ds_path)
session._call_method(session._get_vim(), "MakeDirectory",
session._get_vim().get_service_content().fileManager,
name=ds_path, datacenter=dc_ref,
createParentDirectories=True)
LOG.debug(_("Created directory with path %s"), ds_path)
def get_sub_folders(session, ds_browser, ds_path):
"""Return a set of subfolders for a path on a datastore.
If the path does not exist then an empty set is returned.
"""
search_task = session._call_method(
session._get_vim(),
"SearchDatastore_Task",
ds_browser,
datastorePath=ds_path)
try:
task_info = session._wait_for_task(search_task)
except error_util.FileNotFoundException:
return set()
# populate the folder entries
if hasattr(task_info.result, 'file'):
return set([file.path for file in task_info.result.file])
return set()
| apache-2.0 | -6,792,156,168,277,028,000 | 37.342466 | 78 | 0.629332 | false |
k33k00/tesseract_infinity | t_infinity.superold/logistics_create.py | 1 | 18966 | # -*- coding: utf-8 -*-
import os
import time
from datetime import datetime
import click
import pythoncom
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from win32com.client import Dispatch
from t_infinity import driver
from t_infinity.logger import logger
class LogisticsCreate:
days_in_field = None
call_number = None
def __init__(self, serial_number, product_code, inbound_repair_order_number):
self.instance = driver.instance
self.instance.session_id = driver.session_id
self.wait = WebDriverWait(self.instance, 5)
if self.check_ro_number_is_free(inbound_repair_order_number):
return
history = self.check_history(serial_number, product_code)
if history is 1:
self.in_zulu(serial_number, product_code, inbound_repair_order_number)
elif history is 2:
self.not_in_zulu(serial_number, product_code, inbound_repair_order_number)
elif history is 3:
self._add_new(serial_number, product_code, inbound_repair_order_number)
self.go_to_create()
self.create_welcome_fill()
self.job_site_details_fill()
self.ship_site_details()
self.job_item_details(serial_number, product_code, inbound_repair_order_number)
self.job_details()
self.complete()
self.print(inbound_repair_order_number, self.call_number)
def check_ro_number_is_free(self, inbound_repair_order_number):
logger.debug('checking for RO number')
xpath_repair_order_number = ('//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]'
'/div[1]/div[4]/div[1]/table/tbody/tr[2]/td[2]/div/input[1]')
xpath_first_row = ('//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]'
'/div[1]/div[4]/div[2]/table/tbody/tr[1]')
self.instance.get('https://tesseract-cloud2.co.uk/SC51/SC_SerProd/aspx/serprod_query.aspx')
elem = self.wait.until(ec.visibility_of_element_located((By.XPATH, xpath_repair_order_number)))
logger.debug("Successful navigation, element marker found")
elem.send_keys(inbound_repair_order_number)
logger.debug('%s sent to element', inbound_repair_order_number)
try:
self.wait.until(ec.text_to_be_present_in_element((By.XPATH, xpath_first_row), inbound_repair_order_number))
logger.critical('repair order number exists')
return True
except TimeoutException:
logger.debug('repair order number does not exists')
return False
def check_history(self, serial_number, product_code):
logger.debug('Checking history for %s:%s', serial_number, product_code)
# serialized product query
xpath_serial_number_input = '//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]/div[1]/div[4]/div[1]/table/tbody/tr[2]/td[1]/div/input[1]'
xpath_product_code_input = '//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]/div[1]/div[4]/div[1]/table/tbody/tr[2]/td[7]/div/input[1]'
xpath_serial_number_first_row = '//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]/div[1]/div[4]/div[2]/table/tbody/tr'
# serialized product modify
id_site_no = 'scmaster_cplMainContent_txtSerSiteNum'
self.instance.get('https://tesseract-cloud2.co.uk/SC51/SC_SerProd/aspx/serprod_query.aspx')
elem = self.wait.until(ec.presence_of_element_located((By.XPATH, xpath_serial_number_input)))
elem.send_keys(serial_number)
elem = self.instance.find_element_by_xpath(xpath_product_code_input)
elem.send_keys(product_code)
try:
self.wait.until(ec.text_to_be_present_in_element((By.XPATH, xpath_serial_number_first_row), serial_number))
self.instance.find_element_by_xpath(xpath_serial_number_first_row).click()
element_site_no = self.wait.until(ec.visibility_of_element_located((By.ID, id_site_no)))
site_no = element_site_no.get_attribute('value')
logger.debug("found a site number: %s", site_no)
except TimeoutException:
site_no = False
logger.debug('no site number found')
if site_no is False:
logger.debug('site_no is False')
return 3
if 'ZULU' == site_no:
logger.debug('Serial Number found in %s', site_no)
return 1
if site_no:
logger.debug('Serial Number found in %s', site_no)
return 2
def in_zulu(self, serial_number, product_code, inbound_repair_order_number):
logger.debug("running in_zulu flow")
_ = datetime.strptime(str(datetime.utcnow()), '%Y-%m-%d %H:%M:%S.%f')
today = time.mktime(_.timetuple())
id_current_ro_number = 'scmaster_cplMainContent_txtSerReference2'
id_last_ro_ref = 'scmaster_cplMainContent_txtSerReference1'
id_booked_in_date = 'scmaster_cplMainContent_dtpSerInstallDate'
id_product = 'scmaster_cplMainContent_cboSerProdNum'
id_sumbit = 'scmaster_btnSubmit'
id_status = 'scmaster_cplMainContent_cboSerSeStatCode'
while serial_number not in self.instance.current_url:
self.check_history(serial_number, product_code)
element_current_ro_ref = self.wait.until(ec.visibility_of_element_located((By.ID, id_current_ro_number)))
element_last_ro_ref = self.instance.find_element_by_id(id_last_ro_ref)
element_booked_in_date = self.instance.find_element_by_id(id_booked_in_date)
element_product = self.instance.find_element_by_id(id_product)
element_submit = self.instance.find_element_by_id(id_sumbit)
element_status = self.instance.find_element_by_id(id_status)
previous_product = element_product.get_attribute('value')
previous_repair_order = element_current_ro_ref.get_attribute('value')
if '177' not in previous_product:
if not click.confirm("Tesseract product code appears incorrect, continue?"):
logger.info('killing process')
return
if previous_product != product_code:
if not click.confirm("Your product does not match what is logged on Tesseract, continue?"):
logger.info('killing process')
return
element_last_ro_ref.clear()
element_last_ro_ref.send_keys(previous_repair_order)
element_current_ro_ref.clear()
element_current_ro_ref.send_keys(inbound_repair_order_number)
last_booked_in = element_booked_in_date.get_attribute('value')
logger.debug(last_booked_in)
_ = datetime.strptime(last_booked_in.strip(), '%m/%d/%Y')
last_time_time_stamp = time.mktime(_.timetuple())
self.days_in_field = int(today - last_time_time_stamp) / ((60 ** 2) * 24)
logger.debug(self.days_in_field)
element_booked_in_date.clear()
element_booked_in_date.send_keys(time.strftime("%m/%d/%Y"))
element_status.clear()
element_status.send_keys('REP')
if click.confirm("Record ready for submission, continue?"):
element_submit.click()
else:
return
logger.debug('Submitting...')
self.wait.until(ec.alert_is_present())
Alert(self.instance).accept()
try:
WebDriverWait(self.instance, 3).until(ec.alert_is_present())
logger.critical("Unable to submit, %s", Alert(self.instance).text)
Alert(self.instance).accept()
except TimeoutException:
logger.debug('successfully modified product')
pass
def not_in_zulu(self, serial_number, product_code, inbound_repair_order_number):
logger.debug("running not_in_zulu flow")
xpath_query_serial_number = '//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]/div[1]/div[4]/div[1]/table/tbody/tr[2]/td[1]/div/input[1]'
_ = datetime.strptime(str(datetime.utcnow()), '%Y-%m-%d %H:%M:%S.%f')
today = time.mktime(_.timetuple())
id_delete_button = 'scmaster_btnDelete'
id_last_booked_in = 'scmaster_cplMainContent_dtpSerInstallDate'
while serial_number not in self.instance.current_url:
self.check_history(serial_number, product_code)
element_delete_button = self.wait.until(ec.visibility_of_element_located((By.ID, id_delete_button)))
element_last_booked_in_date = self.instance.find_element_by_id(id_last_booked_in)
last_booked_in = element_last_booked_in_date.get_attribute('value')
logger.debug(last_booked_in)
_ = datetime.strptime(last_booked_in.strip(), '%m/%d/%Y')
last_time_time_stamp = time.mktime(_.timetuple())
self.days_in_field = int(today - last_time_time_stamp) / ((60 ** 2) * 24)
logger.debug(self.days_in_field)
element_delete_button.click()
logger.debug(Alert(self.instance).text)
Alert(self.instance).accept()
try:
WebDriverWait(self.instance, 3).until(ec.alert_is_present())
logger.critical("Unable to delete, %s", Alert(self.instance).text)
Alert(self.instance).accept()
except TimeoutException:
logger.debug('product delete from installation')
pass
self.wait.until(ec.presence_of_element_located((By.XPATH, xpath_query_serial_number)))
self._add_new(serial_number, product_code, inbound_repair_order_number)
def _add_new(self, serial_number, product_code, inbound_repair_order_number):
logger.debug('adding product')
id_add_button = 'scmaster_tdButtonStrip2'
id_serial_number = 'scmaster_cplMainContent_txtSerNum'
id_booked_in_date = 'scmaster_cplMainContent_dtpSerInstallDate'
id_current_ro_ref = 'scmaster_cplMainContent_txtSerReference2'
id_product = 'scmaster_cplMainContent_cboSerProdNum'
id_site_no = 'scmaster_cplMainContent_txtSerSiteNum'
id_status = 'scmaster_cplMainContent_cboSerSeStatCode'
id_submit = 'scmaster_btnSubmit'
element_add_button = self.instance.find_element_by_id(id_add_button)
element_add_button.click()
element_serial_number = self.wait.until(ec.presence_of_element_located((By.ID, id_serial_number)))
element_booked_in_date = self.instance.find_element_by_id(id_booked_in_date)
element_current_ro_ref = self.instance.find_element_by_id(id_current_ro_ref)
element_product = self.instance.find_element_by_id(id_product)
element_site_no = self.instance.find_element_by_id(id_site_no)
element_status = self.instance.find_element_by_id(id_status)
element_submit = self.instance.find_element_by_id(id_submit)
element_serial_number.send_keys(serial_number)
element_booked_in_date.clear()
element_booked_in_date.send_keys(time.strftime("%m/%d/%Y"))
element_current_ro_ref.send_keys(inbound_repair_order_number)
element_product.send_keys(product_code)
element_site_no.send_keys('ZULU')
element_status.send_keys('REP')
logger.debug('submitting...')
element_submit.click()
logger.debug('waiting for popup')
self.wait.until(ec.alert_is_present())
logger.debug(Alert(self.instance).text)
Alert(self.instance).accept()
def go_to_create(self):
self.instance.get('https://tesseract-cloud2.co.uk/SC51/SC_RepairJob/aspx/repairjob_create_wzd.aspx')
def create_welcome_fill(self):
id_book_in_date = 'scmaster_cplMainContent_datBookInDate'
id_next = 'scmaster_cplMainContent_cmdNext'
id_workshop_site = 'scmaster_cplMainContent_cboJobWorkshopSiteNum'
dt = datetime.now()
script_workshop_site = 'DisplayCombo("cboJobWorkshopSiteNum", "frmRepairJobCreateWzd");'
element_workshop_site = self.wait.until(ec.presence_of_element_located((By.ID, id_workshop_site)))
element_workshop_site.clear()
element_workshop_site.send_keys('STOWS')
self.instance.execute_script(script_workshop_site)
if not self._handle_modal('fraModalPopup', 'STOWS'):
return
logger.debug(f'{dt.month}/{dt.day}/{dt.year}')
self.wait.until(
ec.text_to_be_present_in_element_value((By.ID, id_book_in_date), f'{dt.month}/{dt.day}/{dt.year}'))
element_next = self.instance.find_element_by_id(id_next)
element_next.click()
return
def job_site_details_fill(self):
id_site_num = 'scmaster_cplMainContent_cboCallSiteNum'
id_name = 'scmaster_cplMainContent_cboCallSiteName'
id_next = 'scmaster_cplMainContent_cmdNext'
script_site_num = 'DisplayCombo("cboCallSiteNum", "frmRepairJobCreateWzd");'
element_site_num = self.wait.until(ec.presence_of_element_located((By.ID, id_site_num)))
element_site_num.send_keys('ZULU')
self.instance.execute_script(script_site_num)
if not self._handle_modal('fraModalPopup', 'ZULU'):
return
self.wait.until(ec.text_to_be_present_in_element_value((By.ID, id_name), 'Zulu Stock'))
element_next = self.instance.find_element_by_id(id_next)
element_next.click()
return
def ship_site_details(self):
id_ship_site_num = 'scmaster_cplMainContent_cboShipSiteNum'
id_next = 'scmaster_cplMainContent_cmdNext'
self.wait.until(ec.presence_of_element_located((By.ID, id_ship_site_num)))
element_next = self.instance.find_element_by_id(id_next)
element_next.click()
def job_item_details(self, serial_number, product_code, repair_order_number):
id_serial_num = 'scmaster_cplMainContent_cboCallSerNum'
id_material_number = 'scmaster_cplMainContent_cboCallProdNum'
id_repair_order_number = 'scmaster_cplMainContent_txtJobRef6'
id_next = 'scmaster_cplMainContent_cmdNext'
script_serial_num = 'DisplayCombo(\'cboCallSerNum\', \'frmRepairJobCreateWzd\')'
element_serial_num = self.wait.until(ec.presence_of_element_located((By.ID, id_serial_num)))
element_serial_num.send_keys(serial_number)
self.instance.execute_script(script_serial_num)
if not self._handle_modal(expected_value=serial_number):
return
self.wait.until(ec.text_to_be_present_in_element_value((By.ID, id_material_number), product_code))
element_repair_order_number = self.instance.find_element_by_id(id_repair_order_number)
element_repair_order_number.send_keys(repair_order_number)
element_next = self.instance.find_element_by_id(id_next)
element_next.click()
def job_details(self):
id_job_type = 'scmaster_cplMainContent_cboCallCalTCode'
id_flow_code = 'scmaster_cplMainContent_cboJobFlowCode'
id_problem = 'scmaster_cplMainContent_txtCallProblem'
id_desc = 'scmaster_cplMainContent_txtCalTDesc'
id_position = 'scmaster_cplMainContent_txtFlowPos'
id_finsih = 'scmaster_cplMainContent_cmdFinish'
script_job_type = 'DisplayCombo(\'cboCallCalTCode\', \'frmRepairJobCreateWzd\');'
script_flow_code = 'DisplayCombo(\'cboJobFlowCode\', \'frmRepairJobCreateWzd\');'
element_job_type = self.wait.until(ec.presence_of_element_located((By.ID, id_job_type)))
element_flow_code = self.instance.find_element_by_id(id_flow_code)
element_problem = self.instance.find_element_by_id(id_problem)
element_job_type.send_keys('ZR1')
element_flow_code.send_keys('SWBO%')
problems = []
problems.append('This product has been in the filed for ' + str(self.days_in_field) + ' days')
problems.append('This call was automatically generated with T-Infinity created by Kieran Wynne')
for problem in problems:
element_problem.send_keys(problem)
element_problem.send_keys(Keys.RETURN)
self.instance.execute_script(script_job_type)
if not self._handle_modal(expected_value='ZR1'):
return
self.wait.until(ec.text_to_be_present_in_element_value((By.ID, id_desc), 'Zulu Equipment Repair'))
self.instance.execute_script(script_flow_code)
if not self._handle_modal(expected_value='SWBOOKIN'):
return
self.wait.until(ec.text_to_be_present_in_element_value((By.ID, id_position), '1'))
element_finish = self.instance.find_element_by_id(id_finsih)
element_finish.click()
def complete(self):
id_job_numbers = 'scmaster_cplMainContent_txtJobNumbers'
element_job_numbers = self.wait.until(ec.presence_of_element_located((By.ID, id_job_numbers)))
self.call_number = element_job_numbers.text
def _handle_modal(self, frame_id='fraModalPopup', expected_value=None):
wait.until(ec.frame_to_be_available_and_switch_to_it((By.ID, frame_id)))
options = browser.find_elements_by_css_selector('#scmaster_cplMainContent_grdDropdown > tbody > tr')
logger.debug(len(options)
if not options:
self.instance.switch_to_default_content()
return False
if len(options) is 2:
logger.debug('No relevant options exist')
self.instance.switch_to_default_content()
return False
if len(options) > 3:
logger.debug('multiple options available')
click.confirm('Click the option you like then confirm that you are done')
self.instance.switch_to_default_content()
return True
if len(options) is 3:
logger.debug('selecting the only available option')
logger.debug(options[1].text)
if expected_value in options[1].text:
options[1].click()
self.instance.switch_to_default_content()
return True
else:
self.instance.switch_to_default_content()
return False
def print(self, repair_order_number, call_number):
pythoncom.CoInitialize()
labelCom = Dispatch('Dymo.DymoAddIn')
labelText = Dispatch('Dymo.DymoLabels')
current_path = os.path.abspath(os.path.dirname(__file__))
isOpen = labelCom.Open(os.path.join(current_path, "labels/Zulu-book-in.label"))
selectPrinter = 'DYMO LabelWriter 450'
labelCom.SelectPrinter(selectPrinter)
labelText.SetField('RO-Number', repair_order_number)
labelText.SetField('Call-Number', call_number)
labelCom.StartPrintJob()
labelCom.Print(1, False)
labelCom.EndPrintJob()
| mit | -3,056,260,277,002,699,300 | 47.01519 | 162 | 0.654856 | false |
akrherz/pyWWA | parsers/pywwa/cmdline.py | 1 | 1790 | """Our standardized pyWWA command line."""
# stdlib
import argparse
from datetime import datetime, timezone
def parse_cmdline(argv):
"""Parse command line for context settings."""
parser = argparse.ArgumentParser(description="pyWWA Parser.")
parser.add_argument(
"-c",
"--custom-args",
type=str,
nargs="+",
help="Pass custom arguments to this parser.",
)
parser.add_argument(
"-d",
"--disable-dbwrite",
action="store_true",
help=(
"Disable any writing to databases, still may need read access "
"to initialize metadata tables."
),
)
parser.add_argument(
"-e",
"--disable-email",
action="store_true",
help="Disable sending any emails.",
)
parser.add_argument(
"-l",
"--stdout-logging",
action="store_true",
help="Also log to stdout.",
)
parser.add_argument(
"-s",
"--shutdown-delay",
type=int,
help=(
"Number of seconds to wait before shutting down process when "
"STDIN is closed to the process. 0 is immediate."
),
)
def _parsevalid(val):
"""Convert to datetime."""
v = datetime.strptime(val[:16], "%Y-%m-%dT%H:%M")
return v.replace(tzinfo=timezone.utc)
parser.add_argument(
"-u",
"--utcnow",
type=_parsevalid,
metavar="YYYY-MM-DDTHH:MI",
help="Provide the current UTC Timestamp (defaults to realtime.).",
)
parser.add_argument(
"-x",
"--disable-xmpp",
action="store_true",
help="Disable all XMPP functionality.",
)
if argv:
argv = argv[1:]
return parser.parse_args(argv)
| mit | -8,456,757,229,882,486,000 | 25.323529 | 75 | 0.543017 | false |
bslatkin/8-bits | backend/presence.py | 1 | 14516 | #!/usr/bin/env python
#
# Copyright 2010 Brett Slatkin, Nathan Naze
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User login and presence."""
import datetime
import logging
import os
import time
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api.channel import channel
# Local libs
import base
import config
import models
import ndb
import posts
import send_email
def invalidate_user_cache(shard):
"""Invalidates the present user cache for the given shard."""
# Memcache keys from get_present_users()
memcache.delete_multi([
'users-shard-%s' % shard,
'users-shard-%s-stale' % shard])
def marshal_users(user_list):
"""Organizes a list of LoginRecords into a JSON-serializable list."""
if not user_list:
return 'Nobody else is here'
# Sort users present the shortest time first.
user_list.sort(key=lambda u: u.last_update_time, reverse=True)
nicknames = [u.nickname for u in user_list]
if len(user_list) == 1:
return '%s is here too' % nicknames[0]
if len(user_list) == 2:
return '%s and %s are here too' % (nicknames[0], nicknames[1])
return '%s, and %s are here too' % (
', '.join(nicknames[:-1]),
nicknames[-1])
def only_active_users(*login_record_list):
"""Filters a list of users to only be those that are actually active."""
now = datetime.datetime.now()
oldest_time = (
now - datetime.timedelta(seconds=config.user_max_inactive_seconds))
result_list = []
for login_record in login_record_list:
if not login_record.online:
logging.debug('User is no longer online: %r', login_record)
continue
if (not login_record.last_update_time or
login_record.last_update_time < oldest_time):
logging.debug('User update time too far in past: %r', login_record)
continue
result_list.append(login_record)
return result_list
def maybe_update_token(login_record, force=False):
"""Assigns the user a new channel token if needed.
Args:
login_record: Record for the user.
force: Optional. When True, always update the user's token. This is
used when the token is known to be bad on the client side.
Returns:
True if a new token was issued.
"""
now = datetime.datetime.now()
oldest_token_time = (
now - datetime.timedelta(seconds=config.user_token_lifetime_seconds))
if not force and (
login_record.browser_token_issue_time and
login_record.browser_token_issue_time > oldest_token_time):
return False
login_record.browser_token = channel.create_channel(
get_token(login_record.user_id),
# 5 minutes of wiggle room
5 + config.user_token_lifetime_seconds // 60)
login_record.browser_token_issue_time = now
return True
def get_present_users(shard, include_stale=False, limit=1000):
"""Returns a list of present users for a shard in descending log-in order.
Notably, this query is going to be eventually consistent and miss the folks
who have just recently joined. That's okay. It's like they joined the chat
a little bit late. They will still be able to see previous Posts through
historical queries.
"""
shard_key = 'users-shard-%s' % shard
if include_stale:
shard_key = '%s-stale' % shard_key
user_list = memcache.get(shard_key)
if user_list:
return user_list
query = models.LoginRecord.query()
query = query.filter(models.LoginRecord.shard_id == shard)
# When we don't care about stale users, select everyone in the query,
# including users we know are already logged out.
if not include_stale:
query = query.filter(models.LoginRecord.online == True)
query.order(-models.LoginRecord.last_update_time)
user_list = query.fetch(limit)
if not include_stale:
user_list = only_active_users(*user_list)
memcache.set(shard_key, user_list, config.user_max_inactive_seconds)
return user_list
def user_logged_in(shard, user_id):
"""Logs in a user to a shard. Always returns the current user ID."""
login_record = None
if user_id:
# Re-login the user if they somehow lost their browser state and
# needed to reload the page. This assumes the cookie was okay.
login_record = models.LoginRecord.get_by_id(user_id)
if login_record and not login_record.online:
def txn():
login_record = models.LoginRecord.get_by_id(user_id)
assert login_record
login_record.online = True
login_record.put()
logging.debug('Re-logging-in user_id=%r to shard=%r',
login_record.user_id, shard)
ndb.transaction(txn)
# User is logging in for the first time or somehow state was deleted.
if not login_record:
login_record = models.LoginRecord(
key=ndb.Key(models.LoginRecord._get_kind(), models.human_uuid()),
shard_id=shard,
online=True)
login_record.put()
logging.debug('Logged-in new user_id=%r to shard=%r',
login_record.user_id, shard)
invalidate_user_cache(shard)
return login_record.user_id
def user_logged_out(shard, user_id):
"""Notifies other users that the given user has logged out of a shard."""
def txn():
login_record = models.LoginRecord.get_by_id(user_id)
if not login_record:
raise ndb.Rollback()
if not login_record.online:
raise ndb.Rollback()
login_record.online = False
login_record.put()
return login_record
login_record = ndb.transaction(txn)
if not login_record:
logging.warning('Tried to log out user_id=%r from shard=%r, '
'but LoginRecord did not exist', user_id, shard)
return
posts.insert_post(
shard,
archive_type=models.Post.USER_LOGOUT,
nickname=login_record.nickname,
user_id=user_id,
body='%s has left' % login_record.nickname)
invalidate_user_cache(shard)
logging.debug('Logged out user_id=%r from shard=%r', user_id, shard)
def change_presence(shard, user_id, nickname, accepted_terms,
sounds_enabled, retrying, email_address):
"""Changes the presence for a user."""
def txn():
last_nickname = None
user_connected = True
login = models.LoginRecord.get_by_id(user_id)
if not login:
login = models.LoginRecord(
key=ndb.Key(models.LoginRecord._get_kind(), user_id),
shard_id=shard)
elif only_active_users(login):
# This is a heartbeat presence check
user_connected = False
if maybe_update_token(login, force=retrying):
logging.debug(
'Issuing channel token: user_id=%r, shard=%r, force=%r',
user_id, shard, retrying)
if nickname:
# This is a potential nickname change. Right now the client
# always sends the nickname on every request, so we need to
# check for the difference to detect a rename.
last_nickname = login.nickname
login.nickname = nickname
if accepted_terms:
# This is a ToS acceptance
login.accepted_terms_version = config.terms_version
login.online = True
login.sounds_enabled = sounds_enabled
login.email_address = email_address or None
login.put()
return last_nickname, user_connected, login.browser_token
last_nickname, user_connected, browser_token = ndb.transaction(txn)
# Invalidate the cache so the nickname will be updated next time
# someone requests the roster.
invalidate_user_cache(shard)
message = None
archive_type = None
if nickname and last_nickname and last_nickname != nickname:
message = '%s has changed their nickname to %s' % (
last_nickname, nickname)
archive_type = models.Post.USER_UPDATE
logging.debug('User update user_id=%r, shard=%r', user_id, shard)
elif user_connected:
message = '%s has joined' % nickname
archive_type = models.Post.USER_LOGIN
logging.debug('User joined: user_id=%r, shard=%r', user_id, shard)
else:
logging.debug('User heartbeat: user_id=%r to shard=%r',
user_id, shard)
if archive_type:
posts.insert_post(
shard,
archive_type=archive_type,
nickname=nickname,
user_id=user_id,
body=message)
else:
# As long as users are heart-beating, we should be running a
# cleanup task for this shard.
enqueue_cleanup_task(shard)
return user_connected, browser_token
def get_token(user_id):
"""Gets the channel token for the given user."""
return user_id
def enqueue_cleanup_task(shard):
"""Enqueues a task to invoke the ShardCleanupWorker periodically."""
# In case the ShardCleanupWorker runs early, make sure that the task name
# it generates for continuation is guaranteed to run.
offset = time.time() / config.shard_cleanup_period_seconds
name = 'cleanup-%s-time-%d' % (shard, offset)
if name == os.environ.get('HTTP_X_APPENGINE_TASKNAME'):
offset += 1
name = 'cleanup-%s-time-%d' % (shard, offset)
try:
taskqueue.Task(
url='/work/cleanup',
params=dict(shard=shard),
name='cleanup-%s-time-%d' % (
shard, time.time() / config.shard_cleanup_period_seconds),
countdown=config.shard_cleanup_period_seconds
).add(config.cleanup_queue)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
logging.debug('Enqueued cleanup task for shard=%r but task '
'already present', shard)
class ShardCleanupWorker(base.BaseHandler):
"""Handles periodic cleanup requests for a specific shard.
This handler will run periodically (~minute) for all shards that have
active participants. It's meant to do state cleanup for the shard, such as
forcing logouts for users who have not heartbeated in N seconds.
This handler will also enqueue any other periodic tasks that need to
happen for the shard.
"""
def post(self):
shard = self.get_required('shard', str)
all_users_list = get_present_users(
shard, include_stale=True, limit=10000)
# Find users who are now stale and log them out.
active_users_list = only_active_users(*all_users_list)
all_users_set = set(u.user_id for u in all_users_list)
logged_out_set = set(u.user_id for u in all_users_list if not u.online)
active_users_set = set(u.user_id for u in active_users_list)
for user_id in (all_users_set - active_users_set - logged_out_set):
user_logged_out(shard, user_id)
# Enqueue email notification tasks for users
emails_set = {
u.email_address
for u in all_users_list if u.email_address}
send_email.enqueue_email_tasks(emails_set)
# As long as there are still active users, continue to try to
# clean them up.
if active_users_set:
enqueue_cleanup_task(shard)
class PresenceHandler(base.BaseRpcHandler):
"""Handles updating user presence."""
def handle(self):
shard = self.get_required('shard', str)
email_address = self.get_required(
'email_address', unicode, '', html_escape=True)
nickname = self.get_required('nickname', unicode, '', html_escape=True)
accepted_terms = self.get_required('accepted_terms', str, '') == 'true'
sounds_enabled = self.get_required('sounds_enabled', str, '') == 'true'
retrying = self.get_required('retrying', str, '') == 'true'
# Make sure this shard can be logged into.
shard_record = models.Shard.get_by_id(shard)
if shard_record and shard_record.root_shard:
raise base.TopicShardError('Cannot login to topic shard')
if 'shards' not in self.session:
# First login on any shard with no cookie present.
self.session['shards'] = {}
user_id = self.session['shards'].get(shard)
if not user_id:
# First login to this shard.
user_id = models.human_uuid()
self.session['shards'][shard] = user_id
user_connected, browser_token = change_presence(
shard, user_id, nickname, accepted_terms, sounds_enabled,
retrying, email_address)
self.json_response['userConnected'] = user_connected
self.json_response['browserToken'] = browser_token
# Always assign the cookie on the top domain, so the user doesn't have
# to accept the terms of service repeatedly.
if not config.is_dev_appserver:
host_parts = self.request.host.split('.')
suffix = '.'.join(host_parts[-2:])
self.session.domain = '.' + suffix
self.session.path = '/'
self.session.save()
class ShowRosterHandler(base.BaseRpcHandler):
"""Handles echoing the roster to a single user."""
require_shard = True
def handle(self):
user_list = get_present_users(self.shard)
adjusted_user_list = []
for user in user_list:
# Do not include ourselves in the roster.
if user.user_id == self.user_id:
continue
adjusted_user_list.append(user)
self.json_response['roster'] = marshal_users(adjusted_user_list)
ROUTES = [
(r'/rpc/show_roster', ShowRosterHandler),
(r'/rpc/presence', PresenceHandler),
(r'/work/cleanup', ShardCleanupWorker),
]
| apache-2.0 | 3,949,385,054,077,959,700 | 33.316785 | 79 | 0.631097 | false |
CN-UPB/OpenBarista | components/decaf-specification/test/validator_test.py | 1 | 2352 | import unittest
import os
import logging
import traceback
from jsonschema import validate as json_validate, ValidationError as json_validateError
from decaf_specification import Specification
class DescriptorValidatorTest(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.StreamHandler())
self.logger.setLevel(level=logging.DEBUG)
self.specification = Specification(logger=self.logger, daemon=None)
self.trace = True
self.single_test = ["dataplaneVNF2.vnfd", "webserver.yaml"]
def test_all(self):
# This is shorter: Just store the pointer to the function
validate = self.specification.descriptorValidator
path = os.getcwd() + "/descriptor/"
success = True
for desc in os.listdir(path):
if desc.endswith(".yaml") or desc.endswith(".vnfd"):
try:
print "Testing file: ", desc
code, parsed = validate(path + desc)
if parsed:
print "Test OK..."
else:
print "Validation FAILED..."
except:
success = False
if(self.trace):
traceback.print_exc()
print "Test FAILED..."
else:
print "Unknown file ending: ", desc
self.assertEqual(success, True)
def test_single(self):
# This is shorter: Just store the pointer to the function
validate = self.specification.descriptorValidator
path = os.getcwd() + "/descriptor/"
success = True
for desc in os.listdir(path):
if desc in self.single_test:
try:
print "Testing file: ", desc
code, parsed = validate(path + desc)
if parsed:
print "Test OK..."
else:
print "Validation FAILED..."
except:
success = False
if(self.trace):
traceback.print_exc()
print "Test FAILED..."
self.assertEqual(success, True)
if __name__ == '__main__':
unittest.main()
| mpl-2.0 | -8,218,582,717,480,867,000 | 28.772152 | 87 | 0.522959 | false |
mattvonrocketstein/smash | smashlib/ipy3x/utils/wildcard.py | 1 | 4628 | # -*- coding: utf-8 -*-
"""Support for wildcard pattern matching in object inspection.
Authors
-------
- Jörgen Stenarson <[email protected]>
- Thomas Kluyver
"""
#*****************************************************************************
# Copyright (C) 2005 Jörgen Stenarson <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import re
import types
from IPython.utils.dir2 import dir2
from .py3compat import iteritems
def create_typestr2type_dicts(dont_include_in_type2typestr=["lambda"]):
"""Return dictionaries mapping lower case typename (e.g. 'tuple') to type
objects from the types package, and vice versa."""
typenamelist = [tname for tname in dir(types) if tname.endswith("Type")]
typestr2type, type2typestr = {}, {}
for tname in typenamelist:
name = tname[:-4].lower() # Cut 'Type' off the end of the name
obj = getattr(types, tname)
typestr2type[name] = obj
if name not in dont_include_in_type2typestr:
type2typestr[obj] = name
return typestr2type, type2typestr
typestr2type, type2typestr = create_typestr2type_dicts()
def is_type(obj, typestr_or_type):
"""is_type(obj, typestr_or_type) verifies if obj is of a certain type. It
can take strings or actual python types for the second argument, i.e.
'tuple'<->TupleType. 'all' matches all types.
TODO: Should be extended for choosing more than one type."""
if typestr_or_type == "all":
return True
if type(typestr_or_type) == type:
test_type = typestr_or_type
else:
test_type = typestr2type.get(typestr_or_type, False)
if test_type:
return isinstance(obj, test_type)
return False
def show_hidden(str, show_all=False):
"""Return true for strings starting with single _ if show_all is true."""
return show_all or str.startswith("__") or not str.startswith("_")
def dict_dir(obj):
"""Produce a dictionary of an object's attributes. Builds on dir2 by
checking that a getattr() call actually succeeds."""
ns = {}
for key in dir2(obj):
# This seemingly unnecessary try/except is actually needed
# because there is code out there with metaclasses that
# create 'write only' attributes, where a getattr() call
# will fail even if the attribute appears listed in the
# object's dictionary. Properties can actually do the same
# thing. In particular, Traits use this pattern
try:
ns[key] = getattr(obj, key)
except AttributeError:
pass
return ns
def filter_ns(ns, name_pattern="*", type_pattern="all", ignore_case=True,
show_all=True):
"""Filter a namespace dictionary by name pattern and item type."""
pattern = name_pattern.replace("*", ".*").replace("?", ".")
if ignore_case:
reg = re.compile(pattern + "$", re.I)
else:
reg = re.compile(pattern + "$")
# Check each one matches regex; shouldn't be hidden; of correct type.
return dict((key, obj) for key, obj in iteritems(ns) if reg.match(key)
and show_hidden(key, show_all)
and is_type(obj, type_pattern))
def list_namespace(namespace, type_pattern, filter, ignore_case=False, show_all=False):
"""Return dictionary of all objects in a namespace dictionary that match
type_pattern and filter."""
pattern_list = filter.split(".")
if len(pattern_list) == 1:
return filter_ns(namespace, name_pattern=pattern_list[0],
type_pattern=type_pattern,
ignore_case=ignore_case, show_all=show_all)
else:
# This is where we can change if all objects should be searched or
# only modules. Just change the type_pattern to module to search only
# modules
filtered = filter_ns(namespace, name_pattern=pattern_list[0],
type_pattern="all",
ignore_case=ignore_case, show_all=show_all)
results = {}
for name, obj in iteritems(filtered):
ns = list_namespace(dict_dir(obj), type_pattern,
".".join(pattern_list[1:]),
ignore_case=ignore_case, show_all=show_all)
for inner_name, inner_obj in iteritems(ns):
results["%s.%s" % (name, inner_name)] = inner_obj
return results
| mit | -6,171,735,956,378,141,000 | 38.20339 | 87 | 0.607652 | false |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/hub_utils.py | 1 | 10042 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import List, Dict, Iterator, Tuple, Any
import torch
from torch import nn
from fairseq import utils
from fairseq.data import encoders
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == 'checkpoint_file':
checkpoint_file = v
elif (
k != 'path'
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
'code': 'bpe_codes',
'bpecodes': 'bpe_codes',
'sentencepiece.bpe.model': 'sentencepiece_model',
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if 'user_dir' in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
'args': args,
'task': task,
'models': models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(args)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info('S\t{}'.format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo['tokens'])
logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str))
logger.info('P\t{}'.format(
' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist()))
))
if hypo['alignment'] is not None and getarg('print_alignment', False):
logger.info('A\t{}'.format(
' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo['alignment']])
))
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| bsd-3-clause | -6,086,778,830,432,588,000 | 36.330855 | 114 | 0.60964 | false |
floli/prbf | prbf.py | 1 | 4348 | #!env python3
import ipdb
import sys
import petsc4py
petsc4py.init(sys.argv)
from petsc4py import PETSc
from mpi4py import MPI
import numpy as np
from phelper import *
eMesh = {1: [np.linspace(0, 1, 4) ],
2: [np.linspace(0, 0.5, 5, False), # False == Do no include endpoint in range
np.linspace(0.5, 1, 8 )],
4: [np.linspace(0, 0.25, 5, False),
np.linspace(0.25, 0.5, 5, False),
np.linspace(0.5, 0.75, 5, False),
np.linspace(0.75, 1, 5, False), ]
}
MPIrank = MPI.COMM_WORLD.Get_rank()
MPIsize = MPI.COMM_WORLD.Get_size()
nSupport = 10 # Number of support points
supportSpace = (-0.1, 1.1) # Range in which the support points are equally distributed
# Dimension of interpolation. Used for adding a polynomial to the matrix. Set to zero to deactivate polynomial
dimension = 1
polyparams = dimension+1 if dimension else 0
def main():
# shuffle_mesh(eMesh)
ePoints = eMesh[MPIsize][MPIrank] # np.array of positions to evaluate
supports = np.linspace(supportSpace[0], supportSpace[1], nSupport)
sPoints = partitions(supports)[MPIrank]
A = PETSc.Mat(); A.create()
E = PETSc.Mat(); E.create()
if MPIrank == MPIsize-1 and dimension > 0: # The last rank gets the polynomial rows
A.setSizes( size = ((len(sPoints)+polyparams, PETSc.DETERMINE), (len(sPoints)+polyparams, PETSc.DETERMINE)) )
E.setSizes( size = ((len(ePoints), PETSc.DETERMINE), (len(sPoints)+polyparams, PETSc.DETERMINE)) )
else:
A.setSizes( size = ((len(sPoints), PETSc.DETERMINE), (len(sPoints), PETSc.DETERMINE)) )
E.setSizes( size = ((len(ePoints), PETSc.DETERMINE), (len(sPoints), PETSc.DETERMINE)) )
A.setName("System Matrix"); A.setFromOptions(); A.setUp()
E.setName("Evaluation Matrix"); E.setFromOptions(); E.setUp()
c = A.createVecRight(); c.setName("Coefficients")
b = A.createVecRight(); b.setName("RHS Function Values")
interp = E.createVecLeft(); interp.setName("interp")
for row in range(*A.owner_range): # Rows are partioned
if row >= len(supports): break # We are not setting the rows for the polynomial, this is done when setting each column.
for col in range(nSupport):
v = basisfunction(abs(supports[row]-supports[col]))
if v != 0:
A.setValue(row, col, v)
b.setValue(row, testfunction(supports[row])) # Add the solution to the RHS
# Add the polynomial
if dimension:
A.setValue(row, nSupport, 1) # Const part of the polynom
A.setValue(nSupport, row, 1) # Ensure symmetricity
for d in range(dimension):
A.setValue(row, nSupport + 1 + d, supports[row]) # Value of support point
A.setValue(nSupport + 1 + d, row, supports[row])
A.assemble(PETSc.Mat.AssemblyType.FLUSH_ASSEMBLY)
zeros = A.createVecRight();
A.setDiagonal(zeros, PETSc.InsertMode.ADD_VALUES)
A.assemble()
b.assemble()
# A.view()
A.view(PETSc.Viewer.DRAW().createDraw()) # Use command line -draw_pause <sec>.
# Print("polyparams= ", polyparams)
# Print("A Size =", A.getSize())
# Print("E Global Size = ", E.getSize())
# Print("E Local Size = ", E.getLocalSize())
# Print("E Owner Range", E.owner_range)
offset = E.owner_range[0]
for row in range(*E.owner_range):
for col in range(E.getSize()[1]-polyparams):
E.setValue(row, col, basisfunction(abs(ePoints[row-offset] - supports[col])))
# Add the polynomial
if dimension:
E.setValue(row, nSupport, 1)
for d in range(dimension):
E.setValue(row, nSupport + 1 + d, ePoints[row-offset])
E.assemble()
# E.view()
E.view(PETSc.Viewer.DRAW().createDraw()) # Use command line -draw_pause <sec>.
b.view()
ksp = PETSc.KSP()
ksp.create()
ksp.setOperators(A, A)
ksp.setFromOptions()
ksp.solve(b, c)
E.mult(c, interp);
c.view()
interp.view()
scatter, interp0 = PETSc.Scatter.toZero(interp)
scatter.scatter(interp, interp0)
scatter, c0 = PETSc.Scatter.toZero(c)
scatter.scatter(c, c0)
if MPIrank == 0:
plot(supports, eMesh, interp0.array, c0.array, dimension)
if __name__ == '__main__':
main()
| lgpl-3.0 | -1,361,749,080,802,548,700 | 34.933884 | 127 | 0.618445 | false |
spraints/for-example | mercurial/obsolete.py | 1 | 30427 | # obsolete.py - obsolete markers handling
#
# Copyright 2012 Pierre-Yves David <[email protected]>
# Logilab SA <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Obsolete markers handling
An obsolete marker maps an old changeset to a list of new
changesets. If the list of new changesets is empty, the old changeset
is said to be "killed". Otherwise, the old changeset is being
"replaced" by the new changesets.
Obsolete markers can be used to record and distribute changeset graph
transformations performed by history rewriting operations, and help
building new tools to reconciliate conflicting rewriting actions. To
facilitate conflicts resolution, markers include various annotations
besides old and news changeset identifiers, such as creation date or
author name.
The old obsoleted changeset is called "precursor" and possible replacements are
called "successors". Markers that used changeset X as a precursors are called
"successor markers of X" because they hold information about the successors of
X. Markers that use changeset Y as a successors are call "precursor markers of
Y" because they hold information about the precursors of Y.
Examples:
- When changeset A is replacement by a changeset A', one marker is stored:
(A, (A'))
- When changesets A and B are folded into a new changeset C two markers are
stored:
(A, (C,)) and (B, (C,))
- When changeset A is simply "pruned" from the graph, a marker in create:
(A, ())
- When changeset A is split into B and C, a single marker are used:
(A, (C, C))
We use a single marker to distinct the "split" case from the "divergence"
case. If two independents operation rewrite the same changeset A in to A' and
A'' when have an error case: divergent rewriting. We can detect it because
two markers will be created independently:
(A, (B,)) and (A, (C,))
Format
------
Markers are stored in an append-only file stored in
'.hg/store/obsstore'.
The file starts with a version header:
- 1 unsigned byte: version number, starting at zero.
The header is followed by the markers. Each marker is made of:
- 1 unsigned byte: number of new changesets "R", could be zero.
- 1 unsigned 32-bits integer: metadata size "M" in bytes.
- 1 byte: a bit field. It is reserved for flags used in obsolete
markers common operations, to avoid repeated decoding of metadata
entries.
- 20 bytes: obsoleted changeset identifier.
- N*20 bytes: new changesets identifiers.
- M bytes: metadata as a sequence of nul-terminated strings. Each
string contains a key and a value, separated by a color ':', without
additional encoding. Keys cannot contain '\0' or ':' and values
cannot contain '\0'.
"""
import struct
import util, base85, node
from i18n import _
_pack = struct.pack
_unpack = struct.unpack
_SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
# the obsolete feature is not mature enough to be enabled by default.
# you have to rely on third party extension extension to enable this.
_enabled = False
# data used for parsing and writing
_fmversion = 0
_fmfixed = '>BIB20s'
_fmnode = '20s'
_fmfsize = struct.calcsize(_fmfixed)
_fnodesize = struct.calcsize(_fmnode)
### obsolescence marker flag
## bumpedfix flag
#
# When a changeset A' succeed to a changeset A which became public, we call A'
# "bumped" because it's a successors of a public changesets
#
# o A' (bumped)
# |`:
# | o A
# |/
# o Z
#
# The way to solve this situation is to create a new changeset Ad as children
# of A. This changeset have the same content than A'. So the diff from A to A'
# is the same than the diff from A to Ad. Ad is marked as a successors of A'
#
# o Ad
# |`:
# | x A'
# |'|
# o | A
# |/
# o Z
#
# But by transitivity Ad is also a successors of A. To avoid having Ad marked
# as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
# This flag mean that the successors express the changes between the public and
# bumped version and fix the situation, breaking the transitivity of
# "bumped" here.
bumpedfix = 1
def _readmarkers(data):
"""Read and enumerate markers from raw data"""
off = 0
diskversion = _unpack('>B', data[off:off + 1])[0]
off += 1
if diskversion != _fmversion:
raise util.Abort(_('parsing obsolete marker: unknown version %r')
% diskversion)
# Loop on markers
l = len(data)
while off + _fmfsize <= l:
# read fixed part
cur = data[off:off + _fmfsize]
off += _fmfsize
nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
# read replacement
sucs = ()
if nbsuc:
s = (_fnodesize * nbsuc)
cur = data[off:off + s]
sucs = _unpack(_fmnode * nbsuc, cur)
off += s
# read metadata
# (metadata will be decoded on demand)
metadata = data[off:off + mdsize]
if len(metadata) != mdsize:
raise util.Abort(_('parsing obsolete marker: metadata is too '
'short, %d bytes expected, got %d')
% (mdsize, len(metadata)))
off += mdsize
yield (pre, sucs, flags, metadata)
def encodemeta(meta):
"""Return encoded metadata string to string mapping.
Assume no ':' in key and no '\0' in both key and value."""
for key, value in meta.iteritems():
if ':' in key or '\0' in key:
raise ValueError("':' and '\0' are forbidden in metadata key'")
if '\0' in value:
raise ValueError("':' are forbidden in metadata value'")
return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
def decodemeta(data):
"""Return string to string dictionary from encoded version."""
d = {}
for l in data.split('\0'):
if l:
key, value = l.split(':')
d[key] = value
return d
class marker(object):
"""Wrap obsolete marker raw data"""
def __init__(self, repo, data):
# the repo argument will be used to create changectx in later version
self._repo = repo
self._data = data
self._decodedmeta = None
def __hash__(self):
return hash(self._data)
def __eq__(self, other):
if type(other) != type(self):
return False
return self._data == other._data
def precnode(self):
"""Precursor changeset node identifier"""
return self._data[0]
def succnodes(self):
"""List of successor changesets node identifiers"""
return self._data[1]
def metadata(self):
"""Decoded metadata dictionary"""
if self._decodedmeta is None:
self._decodedmeta = decodemeta(self._data[3])
return self._decodedmeta
def date(self):
"""Creation date as (unixtime, offset)"""
parts = self.metadata()['date'].split(' ')
return (float(parts[0]), int(parts[1]))
class obsstore(object):
"""Store obsolete markers
Markers can be accessed with two mappings:
- precursors[x] -> set(markers on precursors edges of x)
- successors[x] -> set(markers on successors edges of x)
"""
def __init__(self, sopener):
# caches for various obsolescence related cache
self.caches = {}
self._all = []
# new markers to serialize
self.precursors = {}
self.successors = {}
self.sopener = sopener
data = sopener.tryread('obsstore')
if data:
self._load(_readmarkers(data))
def __iter__(self):
return iter(self._all)
def __nonzero__(self):
return bool(self._all)
def create(self, transaction, prec, succs=(), flag=0, metadata=None):
"""obsolete: add a new obsolete marker
* ensuring it is hashable
* check mandatory metadata
* encode metadata
"""
if metadata is None:
metadata = {}
if 'date' not in metadata:
metadata['date'] = "%d %d" % util.makedate()
if len(prec) != 20:
raise ValueError(prec)
for succ in succs:
if len(succ) != 20:
raise ValueError(succ)
marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
self.add(transaction, [marker])
def add(self, transaction, markers):
"""Add new markers to the store
Take care of filtering duplicate.
Return the number of new marker."""
if not _enabled:
raise util.Abort('obsolete feature is not enabled on this repo')
known = set(self._all)
new = []
for m in markers:
if m not in known:
known.add(m)
new.append(m)
if new:
f = self.sopener('obsstore', 'ab')
try:
# Whether the file's current position is at the begin or at
# the end after opening a file for appending is implementation
# defined. So we must seek to the end before calling tell(),
# or we may get a zero offset for non-zero sized files on
# some platforms (issue3543).
f.seek(0, _SEEK_END)
offset = f.tell()
transaction.add('obsstore', offset)
# offset == 0: new file - add the version header
for bytes in _encodemarkers(new, offset == 0):
f.write(bytes)
finally:
# XXX: f.close() == filecache invalidation == obsstore rebuilt.
# call 'filecacheentry.refresh()' here
f.close()
self._load(new)
# new marker *may* have changed several set. invalidate the cache.
self.caches.clear()
return len(new)
def mergemarkers(self, transaction, data):
markers = _readmarkers(data)
self.add(transaction, markers)
def _load(self, markers):
for mark in markers:
self._all.append(mark)
pre, sucs = mark[:2]
self.successors.setdefault(pre, set()).add(mark)
for suc in sucs:
self.precursors.setdefault(suc, set()).add(mark)
if node.nullid in self.precursors:
raise util.Abort(_('bad obsolescence marker detected: '
'invalid successors nullid'))
def _encodemarkers(markers, addheader=False):
# Kept separate from flushmarkers(), it will be reused for
# markers exchange.
if addheader:
yield _pack('>B', _fmversion)
for marker in markers:
yield _encodeonemarker(marker)
def _encodeonemarker(marker):
pre, sucs, flags, metadata = marker
nbsuc = len(sucs)
format = _fmfixed + (_fmnode * nbsuc)
data = [nbsuc, len(metadata), flags, pre]
data.extend(sucs)
return _pack(format, *data) + metadata
# arbitrary picked to fit into 8K limit from HTTP server
# you have to take in account:
# - the version header
# - the base85 encoding
_maxpayload = 5300
def listmarkers(repo):
"""List markers over pushkey"""
if not repo.obsstore:
return {}
keys = {}
parts = []
currentlen = _maxpayload * 2 # ensure we create a new part
for marker in repo.obsstore:
nextdata = _encodeonemarker(marker)
if (len(nextdata) + currentlen > _maxpayload):
currentpart = []
currentlen = 0
parts.append(currentpart)
currentpart.append(nextdata)
currentlen += len(nextdata)
for idx, part in enumerate(reversed(parts)):
data = ''.join([_pack('>B', _fmversion)] + part)
keys['dump%i' % idx] = base85.b85encode(data)
return keys
def pushmarker(repo, key, old, new):
"""Push markers over pushkey"""
if not key.startswith('dump'):
repo.ui.warn(_('unknown key: %r') % key)
return 0
if old:
repo.ui.warn(_('unexpected old value') % key)
return 0
data = base85.b85decode(new)
lock = repo.lock()
try:
tr = repo.transaction('pushkey: obsolete markers')
try:
repo.obsstore.mergemarkers(tr, data)
tr.close()
return 1
finally:
tr.release()
finally:
lock.release()
def syncpush(repo, remote):
"""utility function to push obsolete markers to a remote
Exist mostly to allow overriding for experimentation purpose"""
if (_enabled and repo.obsstore and
'obsolete' in remote.listkeys('namespaces')):
rslts = []
remotedata = repo.listkeys('obsolete')
for key in sorted(remotedata, reverse=True):
# reverse sort to ensure we end with dump0
data = remotedata[key]
rslts.append(remote.pushkey('obsolete', key, '', data))
if [r for r in rslts if not r]:
msg = _('failed to push some obsolete markers!\n')
repo.ui.warn(msg)
def syncpull(repo, remote, gettransaction):
"""utility function to pull obsolete markers from a remote
The `gettransaction` is function that return the pull transaction, creating
one if necessary. We return the transaction to inform the calling code that
a new transaction have been created (when applicable).
Exists mostly to allow overriding for experimentation purpose"""
tr = None
if _enabled:
repo.ui.debug('fetching remote obsolete markers\n')
remoteobs = remote.listkeys('obsolete')
if 'dump0' in remoteobs:
tr = gettransaction()
for key in sorted(remoteobs, reverse=True):
if key.startswith('dump'):
data = base85.b85decode(remoteobs[key])
repo.obsstore.mergemarkers(tr, data)
repo.invalidatevolatilesets()
return tr
def allmarkers(repo):
"""all obsolete markers known in a repository"""
for markerdata in repo.obsstore:
yield marker(repo, markerdata)
def precursormarkers(ctx):
"""obsolete marker marking this changeset as a successors"""
for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
yield marker(ctx._repo, data)
def successormarkers(ctx):
"""obsolete marker making this changeset obsolete"""
for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
yield marker(ctx._repo, data)
def allsuccessors(obsstore, nodes, ignoreflags=0):
"""Yield node for every successor of <nodes>.
Some successors may be unknown locally.
This is a linear yield unsuited to detecting split changesets."""
remaining = set(nodes)
seen = set(remaining)
while remaining:
current = remaining.pop()
yield current
for mark in obsstore.successors.get(current, ()):
# ignore marker flagged with with specified flag
if mark[2] & ignoreflags:
continue
for suc in mark[1]:
if suc not in seen:
seen.add(suc)
remaining.add(suc)
def foreground(repo, nodes):
"""return all nodes in the "foreground" of other node
The foreground of a revision is anything reachable using parent -> children
or precursor -> successor relation. It is very similar to "descendant" but
augmented with obsolescence information.
Beware that possible obsolescence cycle may result if complex situation.
"""
repo = repo.unfiltered()
foreground = set(repo.set('%ln::', nodes))
if repo.obsstore:
# We only need this complicated logic if there is obsolescence
# XXX will probably deserve an optimised revset.
nm = repo.changelog.nodemap
plen = -1
# compute the whole set of successors or descendants
while len(foreground) != plen:
plen = len(foreground)
succs = set(c.node() for c in foreground)
mutable = [c.node() for c in foreground if c.mutable()]
succs.update(allsuccessors(repo.obsstore, mutable))
known = (n for n in succs if n in nm)
foreground = set(repo.set('%ln::', known))
return set(c.node() for c in foreground)
def successorssets(repo, initialnode, cache=None):
"""Return all set of successors of initial nodes
Successors set of changeset A are a group of revision that succeed A. It
succeed A as a consistent whole, each revision being only partial
replacement. Successors set contains non-obsolete changeset only.
In most cases a changeset A have zero (changeset pruned) or a single
successors set that contains a single successor (changeset A replaced by
A')
When changeset is split, it results successors set containing more than
a single element. Divergent rewriting will result in multiple successors
sets.
They are returned as a list of tuples containing all valid successors sets.
Final successors unknown locally are considered plain prune (obsoleted
without successors).
The optional `cache` parameter is a dictionary that may contains
precomputed successors sets. It is meant to reuse the computation of
previous call to `successorssets` when multiple calls are made at the same
time. The cache dictionary is updated in place. The caller is responsible
for its live spawn. Code that makes multiple calls to `successorssets`
*must* use this cache mechanism or suffer terrible performances."""
succmarkers = repo.obsstore.successors
# Stack of nodes we search successors sets for
toproceed = [initialnode]
# set version of above list for fast loop detection
# element added to "toproceed" must be added here
stackedset = set(toproceed)
if cache is None:
cache = {}
# This while loop is the flattened version of a recursive search for
# successors sets
#
# def successorssets(x):
# successors = directsuccessors(x)
# ss = [[]]
# for succ in directsuccessors(x):
# # product as in itertools cartesian product
# ss = product(ss, successorssets(succ))
# return ss
#
# But we can not use plain recursive calls here:
# - that would blow the python call stack
# - obsolescence markers may have cycles, we need to handle them.
#
# The `toproceed` list act as our call stack. Every node we search
# successors set for are stacked there.
#
# The `stackedset` is set version of this stack used to check if a node is
# already stacked. This check is used to detect cycles and prevent infinite
# loop.
#
# successors set of all nodes are stored in the `cache` dictionary.
#
# After this while loop ends we use the cache to return the successors sets
# for the node requested by the caller.
while toproceed:
# Every iteration tries to compute the successors sets of the topmost
# node of the stack: CURRENT.
#
# There are four possible outcomes:
#
# 1) We already know the successors sets of CURRENT:
# -> mission accomplished, pop it from the stack.
# 2) Node is not obsolete:
# -> the node is its own successors sets. Add it to the cache.
# 3) We do not know successors set of direct successors of CURRENT:
# -> We add those successors to the stack.
# 4) We know successors sets of all direct successors of CURRENT:
# -> We can compute CURRENT successors set and add it to the
# cache.
#
current = toproceed[-1]
if current in cache:
# case (1): We already know the successors sets
stackedset.remove(toproceed.pop())
elif current not in succmarkers:
# case (2): The node is not obsolete.
if current in repo:
# We have a valid last successors.
cache[current] = [(current,)]
else:
# Final obsolete version is unknown locally.
# Do not count that as a valid successors
cache[current] = []
else:
# cases (3) and (4)
#
# We proceed in two phases. Phase 1 aims to distinguish case (3)
# from case (4):
#
# For each direct successors of CURRENT, we check whether its
# successors sets are known. If they are not, we stack the
# unknown node and proceed to the next iteration of the while
# loop. (case 3)
#
# During this step, we may detect obsolescence cycles: a node
# with unknown successors sets but already in the call stack.
# In such a situation, we arbitrary set the successors sets of
# the node to nothing (node pruned) to break the cycle.
#
# If no break was encountered we proceed to phase 2.
#
# Phase 2 computes successors sets of CURRENT (case 4); see details
# in phase 2 itself.
#
# Note the two levels of iteration in each phase.
# - The first one handles obsolescence markers using CURRENT as
# precursor (successors markers of CURRENT).
#
# Having multiple entry here means divergence.
#
# - The second one handles successors defined in each marker.
#
# Having none means pruned node, multiple successors means split,
# single successors are standard replacement.
#
for mark in sorted(succmarkers[current]):
for suc in mark[1]:
if suc not in cache:
if suc in stackedset:
# cycle breaking
cache[suc] = []
else:
# case (3) If we have not computed successors sets
# of one of those successors we add it to the
# `toproceed` stack and stop all work for this
# iteration.
toproceed.append(suc)
stackedset.add(suc)
break
else:
continue
break
else:
# case (4): we know all successors sets of all direct
# successors
#
# Successors set contributed by each marker depends on the
# successors sets of all its "successors" node.
#
# Each different marker is a divergence in the obsolescence
# history. It contributes successors sets distinct from other
# markers.
#
# Within a marker, a successor may have divergent successors
# sets. In such a case, the marker will contribute multiple
# divergent successors sets. If multiple successors have
# divergent successors sets, a cartesian product is used.
#
# At the end we post-process successors sets to remove
# duplicated entry and successors set that are strict subset of
# another one.
succssets = []
for mark in sorted(succmarkers[current]):
# successors sets contributed by this marker
markss = [[]]
for suc in mark[1]:
# cardinal product with previous successors
productresult = []
for prefix in markss:
for suffix in cache[suc]:
newss = list(prefix)
for part in suffix:
# do not duplicated entry in successors set
# first entry wins.
if part not in newss:
newss.append(part)
productresult.append(newss)
markss = productresult
succssets.extend(markss)
# remove duplicated and subset
seen = []
final = []
candidate = sorted(((set(s), s) for s in succssets if s),
key=lambda x: len(x[1]), reverse=True)
for setversion, listversion in candidate:
for seenset in seen:
if setversion.issubset(seenset):
break
else:
final.append(listversion)
seen.append(setversion)
final.reverse() # put small successors set first
cache[current] = final
return cache[initialnode]
def _knownrevs(repo, nodes):
"""yield revision numbers of known nodes passed in parameters
Unknown revisions are silently ignored."""
torev = repo.changelog.nodemap.get
for n in nodes:
rev = torev(n)
if rev is not None:
yield rev
# mapping of 'set-name' -> <function to compute this set>
cachefuncs = {}
def cachefor(name):
"""Decorator to register a function as computing the cache for a set"""
def decorator(func):
assert name not in cachefuncs
cachefuncs[name] = func
return func
return decorator
def getrevs(repo, name):
"""Return the set of revision that belong to the <name> set
Such access may compute the set and cache it for future use"""
repo = repo.unfiltered()
if not repo.obsstore:
return ()
if name not in repo.obsstore.caches:
repo.obsstore.caches[name] = cachefuncs[name](repo)
return repo.obsstore.caches[name]
# To be simple we need to invalidate obsolescence cache when:
#
# - new changeset is added:
# - public phase is changed
# - obsolescence marker are added
# - strip is used a repo
def clearobscaches(repo):
"""Remove all obsolescence related cache from a repo
This remove all cache in obsstore is the obsstore already exist on the
repo.
(We could be smarter here given the exact event that trigger the cache
clearing)"""
# only clear cache is there is obsstore data in this repo
if 'obsstore' in repo._filecache:
repo.obsstore.caches.clear()
@cachefor('obsolete')
def _computeobsoleteset(repo):
"""the set of obsolete revisions"""
obs = set()
getrev = repo.changelog.nodemap.get
getphase = repo._phasecache.phase
for node in repo.obsstore.successors:
rev = getrev(node)
if rev is not None and getphase(repo, rev):
obs.add(rev)
return obs
@cachefor('unstable')
def _computeunstableset(repo):
"""the set of non obsolete revisions with obsolete parents"""
# revset is not efficient enough here
# we do (obsolete()::) - obsolete() by hand
obs = getrevs(repo, 'obsolete')
if not obs:
return set()
cl = repo.changelog
return set(r for r in cl.descendants(obs) if r not in obs)
@cachefor('suspended')
def _computesuspendedset(repo):
"""the set of obsolete parents with non obsolete descendants"""
suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
@cachefor('extinct')
def _computeextinctset(repo):
"""the set of obsolete parents without non obsolete descendants"""
return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
@cachefor('bumped')
def _computebumpedset(repo):
"""the set of revs trying to obsolete public revisions"""
# get all possible bumped changesets
tonode = repo.changelog.node
publicnodes = (tonode(r) for r in repo.revs('public()'))
successors = allsuccessors(repo.obsstore, publicnodes,
ignoreflags=bumpedfix)
# revision public or already obsolete don't count as bumped
query = '%ld - obsolete() - public()'
return set(repo.revs(query, _knownrevs(repo, successors)))
@cachefor('divergent')
def _computedivergentset(repo):
"""the set of rev that compete to be the final successors of some revision.
"""
divergent = set()
obsstore = repo.obsstore
newermap = {}
for ctx in repo.set('(not public()) - obsolete()'):
mark = obsstore.precursors.get(ctx.node(), ())
toprocess = set(mark)
while toprocess:
prec = toprocess.pop()[0]
if prec not in newermap:
successorssets(repo, prec, newermap)
newer = [n for n in newermap[prec] if n]
if len(newer) > 1:
divergent.add(ctx.rev())
break
toprocess.update(obsstore.precursors.get(prec, ()))
return divergent
def createmarkers(repo, relations, flag=0, metadata=None):
"""Add obsolete markers between changesets in a repo
<relations> must be an iterable of (<old>, (<new>, ...)) tuple.
`old` and `news` are changectx.
Trying to obsolete a public changeset will raise an exception.
Current user and date are used except if specified otherwise in the
metadata attribute.
This function operates within a transaction of its own, but does
not take any lock on the repo.
"""
# prepare metadata
if metadata is None:
metadata = {}
if 'date' not in metadata:
metadata['date'] = '%i %i' % util.makedate()
if 'user' not in metadata:
metadata['user'] = repo.ui.username()
tr = repo.transaction('add-obsolescence-marker')
try:
for prec, sucs in relations:
if not prec.mutable():
raise util.Abort("cannot obsolete immutable changeset: %s"
% prec)
nprec = prec.node()
nsucs = tuple(s.node() for s in sucs)
if nprec in nsucs:
raise util.Abort("changeset %s cannot obsolete itself" % prec)
repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
repo.filteredrevcache.clear()
tr.close()
finally:
tr.release()
| gpl-2.0 | -6,756,349,244,520,645,000 | 35.570913 | 79 | 0.602656 | false |
dan-passaro/django-recommend | tests/test_models/test_object_similarity.py | 1 | 6649 | # coding: utf-8
"""Tests for ObjectSimilarity."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import mock
import pytest
from django.contrib.contenttypes import models as ct_models
import django_recommend.tasks
import quotes.models
from django_recommend import models
from tests.utils import make_quote
@pytest.mark.django_db
def test_set_bad_order():
"""ObjectSimilarity.set() always stores in the same order."""
quote_a = make_quote(content='Hello', pk=30)
quote_b = make_quote(content='World', pk=40)
sim_obj = models.ObjectSimilarity.set(quote_a, quote_b, 10)
assert sim_obj.object_1 == quote_a
assert sim_obj.object_2 == quote_b
# Give quotes in other order and make sure the same result happens.
sim_obj.delete()
sim_obj = models.ObjectSimilarity.set(quote_b, quote_a, 20)
assert sim_obj.object_1 == quote_a
assert sim_obj.object_2 == quote_b
@pytest.mark.django_db
def test_set_existing():
"""Setting a similarity for an existing pair just updates the score."""
obj_a = make_quote('Hello')
obj_b = make_quote('World')
sim_obj = models.ObjectSimilarity.set(obj_a, obj_b, 10)
sim_obj_2 = models.ObjectSimilarity.set(obj_a, obj_b, 20)
assert sim_obj.pk == sim_obj_2.pk
assert sim_obj_2.score == 20
@pytest.mark.django_db
def test_set_existing_to_0():
"""A score of 0 causes deletion of an existing ObjectSimilarity."""
obj_a = make_quote('Hello', pk=12)
obj_b = make_quote('World', pk=22)
sim_obj = models.ObjectSimilarity.set(obj_a, obj_b, 10)
ctype = ct_models.ContentType.objects.get_for_model(obj_a)
models.ObjectSimilarity.set(obj_a, obj_b, 0)
with pytest.raises(models.ObjectSimilarity.DoesNotExist):
models.ObjectSimilarity.objects.get(pk=sim_obj.pk)
assert not models.ObjectSimilarity.objects.filter(
object_1_content_type=ctype, object_2_content_type=ctype,
object_1_id=obj_a.id, object_2_id=obj_b.pk).exists()
@pytest.mark.django_db
def test_set_0_doesnt_create():
"""Giving a pair of new objects a score of 0 does nothing."""
obj_a = make_quote('Hello', pk=12)
obj_b = make_quote('World', pk=22)
ctype = ct_models.ContentType.objects.get_for_model(obj_a)
sim_obj = models.ObjectSimilarity.set(obj_a, obj_b, 0)
assert sim_obj is None
assert not models.ObjectSimilarity.objects.filter(
object_1_content_type=ctype, object_2_content_type=ctype,
object_1_id=obj_a.id, object_2_id=obj_b.pk).exists()
@pytest.mark.django_db
def test_instance_list():
"""Querysets/model managers have an instance_list method."""
set_score = models.ObjectSimilarity.set # Just a readability alias
obj_a = make_quote('Hello')
obj_b = make_quote('World')
obj_c = make_quote('Foobar')
set_score(obj_a, obj_b, 1)
set_score(obj_a, obj_c, 2)
instances = models.ObjectSimilarity.objects.all().order_by(
'score').get_instances_for(obj_a)
assert [obj_b, obj_c] == list(instances)
@pytest.mark.django_db
def test_exclude_objects_qset():
"""ObjectSimilarity qset.exclude_objects can take a queryset."""
set_score = models.ObjectSimilarity.set # Just a readability alias
obj_a = make_quote('Hello')
obj_b = make_quote('World')
obj_c = make_quote('Foo')
obj_d = make_quote('Bar')
sim_b = set_score(obj_a, obj_b, 1)
set_score(obj_a, obj_c, 2)
sim_d = set_score(obj_a, obj_d, 3)
sims = models.ObjectSimilarity.objects.all().order_by('score')
sims = sims.exclude_objects(
quotes.models.Quote.objects.filter(pk=obj_c.pk))
assert [sim_b, sim_d] == list(sims)
@pytest.mark.django_db
def test_filter_objects():
"""ObjectSimilarity qset.filter_objects takes a queryset."""
set_score = models.ObjectSimilarity.set # Just a readability alias
obj_a = make_quote('Hello')
obj_b = make_quote('World')
obj_c = make_quote('Foo')
obj_d = make_quote('Bar')
sim_ab = set_score(obj_a, obj_b, 1)
sim_ac = set_score(obj_a, obj_c, 2)
sim_ad = set_score(obj_a, obj_d, 3)
set_score(obj_b, obj_c, 5) # This data that shouldn't be included
set_score(obj_b, obj_d, 6)
quote_a = quotes.models.Quote.objects.filter(pk=obj_a.pk)
sims = models.ObjectSimilarity.objects.filter_objects(quote_a)
sims = sims.order_by('score')
assert [sim_ab, sim_ac, sim_ad] == list(sims)
@pytest.mark.django_db
def test_get_instances_fallback():
"""get_instances_for uses a callback when an instance is missing."""
set_score = models.ObjectSimilarity.set # Just a readability alias
obj_a = make_quote('Hello')
obj_b = make_quote('World')
obj_c = make_quote('Foobar')
ctype = ct_models.ContentType.objects.get_for_model(obj_a)
set_score(obj_a, obj_b, 1)
set_score(obj_a, obj_c, 2)
obj_b_pk = obj_b.pk # .pk gets set to None after delete()
obj_b.delete()
handle_missing = mock.MagicMock()
objs = models.ObjectSimilarity.objects.all().order_by(
'score').get_instances_for(obj_a, handle_missing)
assert 1 == handle_missing.call_count
assert mock.call(ctype.pk, obj_b_pk) == handle_missing.call_args
assert [obj_c] == list(objs)
@pytest.mark.django_db
def test_get_instances_nopurge(settings):
"""get_instances_for propagates ObjectDoesNotExist without a handler."""
settings.RECOMMEND_PURGE_MISSING_DATA = False
set_score = models.ObjectSimilarity.set # Just a readability alias
obj_a = make_quote('Hello')
obj_b = make_quote('World')
obj_c = make_quote('Foobar')
set_score(obj_a, obj_b, 1)
set_score(obj_a, obj_c, 2)
obj_b.delete()
with pytest.raises(quotes.models.Quote.DoesNotExist):
models.ObjectSimilarity.objects.all().order_by(
'score').get_instances_for(obj_a)
@pytest.mark.django_db
def test_get_instances_purge(settings):
"""get_instances_for deletes missing data when purge is set."""
settings.RECOMMEND_PURGE_MISSING_DATA = True
obj_a = make_quote('Hello')
obj_b = make_quote('World')
django_recommend.set_score('foo', obj_a, 1)
django_recommend.set_score('foo', obj_b, 2)
django_recommend.tasks.update_similarity(obj_a)
obj_b.delete()
assert 2 == django_recommend.models.UserScore.objects.count()
assert 1 == django_recommend.models.ObjectSimilarity.objects.count()
models.ObjectSimilarity.objects.all().order_by(
'score').get_instances_for(obj_a)
assert 1 == django_recommend.models.UserScore.objects.count()
assert 0 == django_recommend.models.ObjectSimilarity.objects.count()
| mit | -818,364,985,226,614,300 | 33.630208 | 76 | 0.67183 | false |
NervanaSystems/neon | examples/mnist_mlp.py | 1 | 3202 | #!/usr/bin/env python
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Train a small multi-layer perceptron with fully connected layers on MNIST data.
This example has some command line arguments that enable different neon features.
Examples:
python examples/mnist_mlp.py -b gpu -e 10
Run the example for 10 epochs using the NervanaGPU backend
python examples/mnist_mlp.py --eval_freq 1
After each training epoch, process the validation/test data
set through the model and display the cost.
python examples/mnist_mlp.py --serialize 1 -s checkpoint.pkl
After every iteration of training, dump the model to a pickle
file named "checkpoint.pkl". Changing the serialize parameter
changes the frequency at which the model is saved.
python examples/mnist_mlp.py --model_file checkpoint.pkl
Before starting to train the model, set the model state to
the values stored in the checkpoint file named checkpoint.pkl.
"""
from neon.callbacks.callbacks import Callbacks
from neon.data import MNIST
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary, Misclassification
from neon.util.argparser import NeonArgparser
from neon import logger as neon_logger
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# load up the mnist data set
dataset = MNIST(path=args.data_dir)
train_set = dataset.train_iter
valid_set = dataset.valid_iter
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
# setup model layers
layers = [Affine(nout=100, init=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
# setup optimizer
optimizer = GradientDescentMomentum(
0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
mlp = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
# run fit
mlp.fit(train_set, optimizer=optimizer,
num_epochs=args.epochs, cost=cost, callbacks=callbacks)
error_rate = mlp.eval(valid_set, metric=Misclassification())
neon_logger.display('Misclassification error = %.1f%%' % (error_rate * 100))
| apache-2.0 | -2,849,390,297,457,863,700 | 34.186813 | 84 | 0.718613 | false |
catsky/rebang | 1/chartnet/view/views.py | 1 | 19356 | #-*- coding:utf-8 -*-
from flask import g, render_template, request, url_for, redirect, session
from chartnet import app
from chartnet import setting
from models import operatorDB
from common import login_required
from werkzeug import secure_filename
import os
import re
import time
from PIL import Image
from StringIO import StringIO
import sae
from sae.storage import Bucket
import logging
import json
import weixin
#jinja filter
from datetime import datetime
@app.template_filter()
def timesince(timestamp, default=u"刚才"):
"""
Returns string representing "time since" e.g.
3 days ago, 5 hours ago etc.
"""
dt = datetime.fromtimestamp(timestamp)
now = datetime.now()
diff = now - dt
periods = (
(diff.days / 365, u"年"),
(diff.days / 30, u"月"),
(diff.days / 7, u"星期"),
(diff.days, u"天"),
(diff.seconds / 3600, u"小时"),
(diff.seconds / 60, u"分钟"),
(diff.seconds, u"秒钟"),
)
for period, singular in periods:
if period:
return u"%d %s之前" % (period, singular)
return default
@app.template_filter()
def tag_separate_link(tags):
"""
return string of seperated tag links
"""
links = ''
if tags:
taglist = tags.split(',')
for tag in taglist:
links += "<a href='/tag/%s' rel='tag'>%s</a> "%(tag, tag)
return links
from sae.ext.storage import monkey
monkey.patch_all()
# create db table if first run
from chartnet import db
db.create_all(app=app)
if True:
import sae.mail
from sae.taskqueue import add_task
import sae.storage
######
def put_obj2storage(file_name = '', data = '', expires='365', type=None, encoding= None, domain_name = setting.STORAGE_DOMAIN_NAME):
import sae.const
access_key = sae.const.ACCESS_KEY
secret_key = sae.const.SECRET_KEY
appname = sae.const.APP_NAME
domain_name = "attachment"
bucket = Bucket(domain_name)
bucket.put()
bucket.post(metadata={'expires': '2d'})
attrs = bucket.stat()
bucket.put_object(file_name, data)
file_url = bucket.generate_url(file_name)
#begin to created
im = Image.open("/s/attachment/" + file_name)
#im = Image.open(bucket.get_object_contents(file_name))
im.thumbnail((320,200))
#im.save("/s/attachment/" + file_name+"_thumbnail.jpg")
imgext = re.search("(\.\w*)$", file_name)
if imgext:
thumbnail_name = file_name + "_thumbnail"+ imgext.group(1)
else:
thumbnail_name = file_name + "_thumbnail.jpg"
pureext = imgext.group(1)[1:]
if pureext == "jpg":
pureext = "jpeg"
#bucket.put_object(thumbnail_name, im.tostring('jpeg', 'RGB'))
bucket.put_object(thumbnail_name, im.tostring("jpeg", 'RGB'))
thumbnail_url = bucket.generate_url(thumbnail_name)
#s = sae.storage.Client()
#ob = sae.storage.Object(data = data, cache_control='access plus %s day' % expires, content_type= type, content_encoding= encoding)
return file_url, thumbnail_url
#return s.put(domain_name, file_name, ob)
#from sae.const import (MYSQL_HOST, MYSQL_HOST_S,MYSQL_PORT, MYSQL_USER, MYSQL_PASS, MYSQL_DB)
@app.before_request
def before_request():
#appinfo = sae.core.Application()
#g.db = MySQLdb.connect(MYSQL_HOST, MYSQL_USER, MYSQL_PASS,MYSQL_DB, port=int(MYSQL_PORT)
pass
@app.teardown_request
def teardown_request(exception):
if hasattr(g,'db') : g.db.close()
@app.route('/start')
def start():
return render_template('start.html')
@app.route('/crossdomain.xml')
def crossdomain():
return render_template('crossdomain.xml')
@app.route('/')
@app.route('/index', methods = ['GET', 'POST'])
@app.route('/index/<int:page>', methods = ['GET', 'POST'])
def _index(page = 1):
posts = None
if request.args.get('_start','') != '' :
_start = int(request.args.get('_start', ''))
if request.args.get('cat','') != '' :
posts = operatorDB.get_post_page_category(request.args.get('cat',''))
elif request.args.get('tags','') != '' :
posts = operatorDB.get_post_page_tags(request.args.get('tags',''))
else:
posts = operatorDB.get_post_page(page)
editor_posts = operatorDB.get_editor_post(0,3)
return render_template('index.html', c=request.args.get('cat',''),
t=request.args.get('tags',''),
tags = operatorDB.get_all_tag_name(),
cats=operatorDB.get_all_cat_name(),
links=operatorDB.get_all_links(),
posts=posts,
BASE_URL=setting.BASE_URL,
editor_posts = editor_posts)
@app.route('/category/<string:category_name>')
def category(category_name):
_start = 0
_end = 0
if request.args.get('_start', '') != '':
_start = int(request.args.get('_start', ''))
_end = _start + setting.EACH_PAGE_POST_NUM
posts = operatorDB.get_post_page_category(
category_name)
return render_template('category.html',
cats = operatorDB.get_all_cat_name(),
posts = posts,
BASE_URL = setting.BASE_URL,
category_name = category_name)
@app.route('/tag/<string:tag_name>')
def tag(tag_name):
_start = 0
_end = 0
if request.args.get('_start', '') != '':
_start = int(request.args.get('_start', ''))
_end = _start + setting.EACH_PAGE_POST_NUM
posts = operatorDB.get_post_page_tags(
tag_name)
return render_template('tag.html',
tags = operatorDB.get_all_tag_name(),
posts = posts,
BASE_URL = setting.BASE_URL,
tag_name = tag_name)
@app.route('/download')
def download():
return render_template('download.html',coms=operatorDB.get_comments_new(),tags = operatorDB.get_all_tag_name(),cats=operatorDB.get_all_cat_name(),links=operatorDB.get_all_links())
@app.route('/detailpost/<int:post_id>', methods=['GET', 'POST'])
def detailpost(post_id):
if request.method == 'POST':
operatorDB.add_new_comment(post_id,request.form.get('author', ''),request.form.get('email', ''),request.form.get('url', ''),1,request.form.get('comment', ''))
_article = operatorDB.detail_post_by_id(post_id)
comments = operatorDB.get_post_comments(post_id)
comLen = len(comments)
_older,_newer = operatorDB.get_post_older_newer(post_id)
if request.args.get('weixin', '') != '':
return render_template('detailpost_weixin.html',post_id=post_id,obj=_article,add_time=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(_article._add_time)))
else:
return render_template('detailpost.html',_older=_older,_newer=_newer,coms=operatorDB.get_comments_new(),tags = operatorDB.get_all_tag_name(),cats=operatorDB.get_all_cat_name(),links=operatorDB.get_all_links(),post_id=post_id,comLen=comLen,comments=comments,obj=_article,add_time=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(_article._add_time)))
@app.route('/timeline')
def _timeline():
_data = operatorDB.getTimelineData()
rs = u'{"timeline":{"headline":"c-house的时间线","type":"default","text":"始于2012年7月","date": [%s]}}' % _data.timeline_data
return render_template('timeline.html',rs=rs,coms=operatorDB.get_comments_new(),tags = operatorDB.get_all_tag_name(),cats=operatorDB.get_all_cat_name(),links=operatorDB.get_all_links())
@app.route('/about', methods=['GET', 'POST'])
def _about():
post = operatorDB.getAboutPost()
return render_template('about.html', post=post)
@app.route('/contact-us', methods=['GET', 'POST'])
def _contact():
post = operatorDB.getContactPost()
return render_template('contact.html', post=post)
@app.route('/soGoodorBad')
def _soGoodorBad():
if request.args['action']=='so_good':
operatorDB.addSogood(request.args['id'])
if request.args['action']=='so_bad':
operatorDB.addSobad(request.args['id'])
return redirect(url_for('_about'))
#管理员
@app.route('/admin/index')
@login_required
def admin_index():
return render_template('admin/index_admin.html',title=u'后台管理',SITE_TITLE=setting.SITE_TITLE,BASE_URL=setting.BASE_URL)
@app.route('/admin/logout')
def admin_logout():
session.pop('username', None)
return render_template('admin/login_admin.html',has_user=operatorDB.has_user(),title=u'管理员登录',SITE_TITLE=setting.SITE_TITLE,BASE_URL=setting.BASE_URL)
@app.route('/admin/login', methods=['GET', 'POST'])
def admin_login():
if request.method == 'POST':
if not operatorDB.has_user():
operatorDB.add_user(request.form.get('name', ''),request.form.get('password', ''))
session['username'] = request.form.get('name', '')
return redirect(url_for('admin_index'))
if operatorDB.login_user(request.form.get('name', ''),request.form.get('password', '')):
session['username'] = request.form.get('name', '')
return redirect(url_for('admin_index'))
return render_template('admin/login_admin.html',has_user=operatorDB.has_user(),title=u'管理员登录',SITE_TITLE=setting.SITE_TITLE,BASE_URL=setting.BASE_URL)
@app.route('/admin/links', methods=['GET', 'POST'])
@login_required
def admin_links():
obj = None
if request.method == 'POST':
act = request.form['act']
if act == 'add':
operatorDB.add_new_link(request.form.get('name', ''),request.form.get('sort', ''),request.form.get('url', ''))
if act == 'edit':
operatorDB.update_link_edit(request.form.get('id', ''),request.form.get('name', ''),request.form.get('sort', ''),request.form.get('url', ''))
if request.method == 'GET':
act = request.args.get('act', '')
if act == 'del':
operatorDB.del_link_by_id(request.args.get('id', ''))
if act == 'edit':
obj = operatorDB.get_link_by_id(request.args.get('id', ''))
return render_template('admin/link_admin.html',obj=obj,objs=operatorDB.get_all_links(),title=u'友情链接管理',SITE_TITLE=setting.SITE_TITLE,BASE_URL=setting.BASE_URL)
@app.route('/admin/add_post', methods=['GET', 'POST'])
@login_required
def admin_addpost():
if request.method == 'POST':
_post_type = request.form.get('post_type', '')
if _post_type == '':
_post_type = 0
_tags = request.form.get('tags', '').replace(u',',',')
tagslist = set([x.strip() for x in _tags.split(',')])
try:
tagslist.remove('')
except:
pass
if tagslist:
_tags = ','.join(tagslist)
logging.error(request.form.get('content', ''))
imgsrc = re.search("img src=(\S*)", request.form.get('content', ''))
imgthumbnail = ''
if imgsrc:
imgext = re.search("(\.\w*)$", imgsrc.group(1))
if imgext:
logging.error(">>admin_addpost: %s" % imgsrc.group(1)+"_thumbnail"+ imgext.group(1))
imgthumbnail = imgsrc.group(1)+"_thumbnail"+ imgext.group(1)
_article = operatorDB.add_new_article(request.form.get('category', ''),
request.form.get('title', ''),
request.form.get('content', ''),
_tags,request.form.get('password', ''),
shorten_content(request.form.get('content', '')),
imgthumbnail, _post_type,
request.form.get('editor_title', '')
)
postId = _article._id
if _tags!='':
operatorDB.add_postid_to_tags(_tags.split(','), str(postId))
operatorDB.add_postid_to_cat(request.form.get('category', ''),str(postId))
if operatorDB.isHasData():
updateTimelineData(_article)
else:
addTimelineData()
cats = operatorDB.get_all_cat_name()
tags = operatorDB.get_all_tag_name()
return render_template('admin/addpost_admin.html',title=u'添加文章',cats=cats,tags=tags,SITE_TITLE=setting.SITE_TITLE,BASE_URL=setting.BASE_URL)
@app.route('/admin/edit_post',methods=['GET', 'POST'])
@login_required
def admin_editpost():
_article = None
if request.method == 'POST':
post_id = request.form.get('id', '')
if request.form.get('act', '')=='editpost':
_post_type = request.form.get('post_type', '')
if _post_type == '':
_post_type = 0
_tags = request.form.get('tags', '').replace(u',',',')
tagslist = set([x.strip() for x in _tags.split(',')])
try:
tagslist.remove('')
except:
pass
if tagslist:
_tags = ','.join(tagslist)
imgsrc = re.search("img src=(\S*)", request.form.get('content', ''))
imgthumbnail = ''
if imgsrc:
imgext = re.search("(\.\w*)$", imgsrc.group(1))
if imgext:
logging.error(">>admin_addpost: %s" % imgsrc.group(1)+"_thumbnail"+ imgext.group(1))
imgthumbnail = imgsrc.group(1)+"_thumbnail"+ imgext.group(1)
operatorDB.update_article(request.form.get('id', ''),
request.form.get('category', ''),
request.form.get('title', ''),
request.form.get('content', ''),
_tags,request.form.get('password', ''),
shorten_content(request.form.get('content', '')),
imgthumbnail, _post_type,
request.form.get('editor_title', '')
)
if _tags!='':
operatorDB.add_postid_to_tags(_tags.split(','), str(post_id))
operatorDB.add_postid_to_cat(request.form.get('category', ''),str(post_id))
_article = operatorDB.detail_post_by_id(post_id)
cats = operatorDB.get_all_cat_name()
tags = operatorDB.get_all_tag_name()
return render_template('admin/editpost_admin.html',obj=_article,cats=cats,tags=tags,SITE_TITLE=setting.SITE_TITLE,BASE_URL=setting.BASE_URL)
@app.route('/admin/del_post/<int:post_id>')
@login_required
def admin_delpost(post_id):
operatorDB.del_post_by_id(post_id)
return redirect(url_for('admin_editpost'))
@app.route('/admin/comment',methods=['GET', 'POST'])
@login_required
def admin_editcomment():
comments = None
if request.method == 'GET':
print('----------admin_editcomment-----GET--------')
if request.args.get('act', '')=='del':
commentid = request.args.get('commentid', '')
operatorDB.del_comment_by_id(commentid)
post_id = request.args.get('post_id', '')
comments = operatorDB.get_post_comments(post_id)
if request.method == 'POST':
post_id = request.form.get('id', '')
comments = operatorDB.get_post_comments(post_id)
return render_template('admin/editcomment_admin.html',comments=comments,SITE_TITLE=setting.SITE_TITLE,BASE_URL=setting.BASE_URL)
@app.route('/uploadFile',methods=['GET', 'POST'])
def uploadFile():
if request.method == 'POST':
file = request.files['Filedata']
file_url = ''
new_file_name = ''
if file:
filename = secure_filename(file.filename)
try:
file_type = filename.split('.')[-1].lower()
new_file_name = "%d.%s"% (int(time.time()), file_type)
except:
file_type = ''
new_file_name = str(int(time.time()))
if setting.debug:
file.save(os.path.join(app.config['UPLOAD_FOLDER'],new_file_name))
else:
encoding = None
if "*.png;*.jpg;*.jpeg;*.gif;".find(file_type) != -1:#图片
img = Image.open(StringIO(file.stream.read()))
file_url, thumbnail_url = put_obj2storage(file_name = new_file_name, data = img.tostring('jpeg', 'RGB'), expires='365', type= file.content_type, encoding= encoding)
else:
file_url, thumbnail_url = put_obj2storage(file_name = new_file_name, data = file.stream.read(), expires='365', type= file.content_type, encoding= encoding)
logging.error("==>uploadfile: %s, thumbnail file:%s" % (file_url, thumbnail_url))
ret = {}
ret["imgUrl"] = file_url
ret["thumbnailUrl"] = thumbnail_url
file_url_json = json.dumps(ret)
return file_url_json
#公众号消息服务器网址接入验证
#需要在公众帐号管理台手动提交, 验证后方可接收微信服务器的消息推送
@app.route('/weixin', methods=['GET'])
def weixin_access_verify():
echostr = request.args.get('echostr')
if weixin.verification(request) and echostr is not None:
return echostr
return 'access verification fail'
#来自微信服务器的消息推送
@app.route('/weixin', methods=['POST'])
def weixin_msg():
logging.error("1.weixin: in weixin_msg ")
if weixin.verification(request):
logging.error("2.weixin verify done")
data = request.data
msg = weixin.parse_msg(data)
if weixin.user_subscribe_event(msg):
return weixin.help_info(msg)
elif weixin.is_text_msg(msg):
content = msg['Content']
if content == u'?' or content == u'?':
return weixin.help_info(msg)
elif (content == u'n' or content == u'N'
or content == u'new' or content == u'NEW'
or content == u'New'):
posts = operatorDB.get_weixin_articles()
rmsg = weixin.response_news_msg(msg, posts)
logging.error("3.weixin get rmsg: %s"%rmsg)
return rmsg
else:
return weixin.help_info(msg)
elif weixin.is_location_msg(msg):
Label = msg['Label']
return weixin.help_info(msg)
return 'message processing fail'
def shorten_content(htmlstr='',sublength=80):
result = re.sub(r'<[^>]+>', '', htmlstr)
result = result.replace(" ","")
return result[0:sublength]
def addTimelineData():
_list = operatorDB.getArticleAllForTimeline()
_data = ''
for _article in _list:
startDate = time.strftime('%Y,%m,%d',time.localtime(_article._add_time))
headline = _article._title
text = '%s --- %sdetailpost/%s' % (_article._category,setting.BASE_URL,_article._id)
_data = '%s,{"startDate":"%s","headline":"%s","text":"%s","asset":{"media":"","credit":"","caption":""}}' % (_data,startDate,headline,text)
operatorDB.saveTimelineData(_data[1:])
def updateTimelineData(_article):
timelineData = operatorDB.getTimelineData()
startDate = time.strftime('%Y,%m,%d',time.localtime(_article._add_time))
headline = _article._title
text = '%s --- %sdetailpost/%s' % (_article._category,setting.BASE_URL,_article._id)
_data = '%s,{"startDate":"%s","headline":"%s","text":"%s","asset":{"media":"","credit":"","caption":""}}' % (timelineData.timeline_data,startDate,headline,text)
operatorDB.saveTimelineData(_data,timelineData.id) | mit | -5,900,452,819,933,207,000 | 39.970021 | 353 | 0.58802 | false |
CivicKnowledge/ambry_sources | tests/test_sources/unit/test_accessors.py | 1 | 8210 | # -*- coding: utf-8 -*-
import unittest
from collections import OrderedDict
from attrdict import AttrDict
try:
# py3
from unittest.mock import Mock, MagicMock, patch, call, PropertyMock
except ImportError:
# py2
from mock import Mock, MagicMock, patch, call, PropertyMock
from six import u
from ambry_sources.sources import SourceSpec, ShapefileSource, DatabaseRelationSource
from ambry_sources.sources.util import DelayedOpen
class TestShapefileSource(unittest.TestCase):
def _get_fake_collection(self):
""" Returns fake collection which could be used as replacement for fiona.open(...) return value. """
class FakeCollection(object):
schema = {
'properties': OrderedDict([('col1', 'int:10')])}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def __iter__(self):
return iter([{'properties': OrderedDict([('col1', 1)]), 'geometry': 'LINE', 'id': '0'}])
return FakeCollection()
# _convert_column tests
def test_converts_shapefile_column(self):
spec = Mock()
spec.start_line = 0
spec.header_lines = []
fstor = Mock(DelayedOpen)
source = ShapefileSource(spec, fstor)
expected_column = {'name': 'name1', 'type': 'int'}
self.assertEqual(
source._convert_column((u('name1'), 'int:3')),
expected_column)
# _get_columns tests
def test_converts_given_columns(self):
spec = Mock()
spec.start_line = 0
spec.header_lines = []
fstor = Mock(spec=DelayedOpen)
source = ShapefileSource(spec, fstor)
column1 = ('name1', 'int:10')
column2 = ('name2', 'str:10')
converted_column1 = {'name': 'name1', 'type': 'int'}
converted_column2 = {'name': 'name2', 'type': 'str'}
shapefile_columns = OrderedDict([column1, column2])
ret = source._get_columns(shapefile_columns)
self.assertIn(converted_column1, ret)
self.assertIn(converted_column2, ret)
def test_extends_with_id_and_geometry(self):
spec = Mock()
spec.start_line = 0
spec.header_lines = []
fstor = Mock(spec=DelayedOpen)
source = ShapefileSource(spec, fstor)
shapefile_columns = OrderedDict()
ret = source._get_columns(shapefile_columns)
self.assertEqual(len(ret), 2)
names = [x['name'] for x in ret]
self.assertIn('id', names)
self.assertIn('geometry', names)
types = [x['type'] for x in ret]
self.assertIn('geometry_type', types)
@patch('shapely.wkt.dumps')
@patch('shapely.geometry.shape')
@patch('fiona.open')
def test_reads_first_layer_if_spec_segment_is_empty(self, fake_open, fake_shape, fake_dumps):
fake_collection = self._get_fake_collection()
fake_open.return_value = fake_collection
spec = SourceSpec('http://example.com')
assert spec.segment is None
fstor = Mock(spec=DelayedOpen)
fstor._fs = Mock()
source = ShapefileSource(spec, fstor)
next(source._get_row_gen())
self.assertEqual(len(fake_open.mock_calls), 1)
self.assertEqual(
fake_open.call_args_list[0][1]['layer'],
0,
'open function was called with wrong layer.')
@patch('shapely.wkt.dumps')
@patch('shapely.geometry.shape')
@patch('fiona.open')
def test_reads_layer_specified_by_segment(self, fake_open, fake_shape, fake_dumps):
fake_collection = self._get_fake_collection()
fake_open.return_value = fake_collection
spec = SourceSpec('http://example.com', segment=5)
fstor = Mock(spec=DelayedOpen)
fstor._fs = Mock()
source = ShapefileSource(spec, fstor)
next(source._get_row_gen())
self.assertEqual(len(fake_open.mock_calls), 1)
self.assertEqual(
fake_open.call_args_list[0][1]['layer'],
5,
'open function was called with wrong layer.')
@patch('shapely.wkt.dumps')
@patch('shapely.geometry.shape')
@patch('ambry_sources.sources.accessors.ShapefileSource._get_columns')
@patch('fiona.open')
def test_populates_columns_of_the_spec(self, fake_open, fake_get, fake_shape, fake_dumps):
fake_collection = self._get_fake_collection()
fake_open.return_value = fake_collection
fake_get.return_value = [{'name': 'col1', 'type': 'int'}]
spec = SourceSpec('http://example.com')
fstor = Mock(spec=DelayedOpen)
fstor._fs = Mock()
source = ShapefileSource(spec, fstor)
next(source._get_row_gen())
self.assertEqual(len(source.spec.columns), 1)
self.assertEqual(source.spec.columns[0].name, 'col1')
self.assertEqual(len(fake_open.mock_calls), 1)
self.assertEqual(len(fake_get.mock_calls), 2)
@patch('shapely.wkt.dumps')
@patch('shapely.geometry.shape')
@patch('ambry_sources.sources.accessors.ShapefileSource._get_columns')
@patch('fiona.open')
def test_converts_row_id_to_integer(self, fake_open, fake_get, fake_shape, fake_dumps):
fake_collection = self._get_fake_collection()
fake_open.return_value = fake_collection
fake_shape.expects_call().is_a_stub()
fake_dumps.expects_call().is_a_stub()
fake_get.return_value = [{'name': 'col1', 'type': 'int'}]
spec = SourceSpec('http://example.com')
fstor = Mock(spec=DelayedOpen)
fstor._fs = Mock()
source = ShapefileSource(spec, fstor)
row_gen = source._get_row_gen()
first_row = next(row_gen)
self.assertEqual(first_row[0], 0)
self.assertEqual(len(fake_open.mock_calls), 1)
self.assertEqual(len(fake_get.mock_calls), 2)
@patch('shapely.wkt.dumps')
@patch('shapely.geometry.shape')
@patch('ambry_sources.sources.accessors.ShapefileSource._get_columns')
@patch('fiona.open')
def test_saves_header(self, fake_open, fake_get, fake_shape, fake_dumps):
fake_collection = self._get_fake_collection()
fake_open.return_value = fake_collection
fake_get.return_value = [
{'name': 'id', 'type': 'int'},
{'name': 'col1', 'type': 'int'},
{'name': 'geometry', 'type': 'geometry_type'}]
spec = SourceSpec('http://example.com')
fstor = Mock(spec=DelayedOpen)
fstor._fs = Mock()
source = ShapefileSource(spec, fstor)
next(source._get_row_gen())
self.assertEqual(source._headers, ['id', 'col1', 'geometry'])
self.assertEqual(len(fake_open.mock_calls), 1)
self.assertEqual(len(fake_get.mock_calls), 2)
@patch('shapely.wkt.dumps')
@patch('shapely.geometry.shape')
@patch('ambry_sources.sources.accessors.ShapefileSource._get_columns')
@patch('fiona.open')
def test_last_element_in_the_row_is_wkt(self, fake_open, fake_get, fake_shape, fake_dumps):
fake_collection = self._get_fake_collection()
fake_open.return_value = fake_collection
fake_shape.expects_call().is_a_stub()
fake_dumps.return_value = 'I AM FAKE WKT'
fake_get.return_value = [{'name': 'col1', 'type': 'int'}]
spec = SourceSpec('http://example.com')
fstor = Mock(spec=DelayedOpen)
fstor._fs = Mock()
source = ShapefileSource(spec, fstor)
row_gen = source._get_row_gen()
first_row = next(row_gen)
self.assertEqual(first_row[-1], 'I AM FAKE WKT')
self.assertEqual(len(fake_open.mock_calls), 1)
self.assertEqual(len(fake_get.mock_calls), 2)
class DatabaseRelationSourceTest(unittest.TestCase):
def test_uses_url_as_table(self):
fake_execute = Mock(return_value=iter([[1], [2]]))
connection = AttrDict({'execute': fake_execute})
spec = SourceSpec('table1')
relation_source = DatabaseRelationSource(spec, 'sqlite', connection)
rows = [x for x in relation_source]
self.assertEqual(rows, [[1], [2]])
fake_execute.assert_called_once_with('SELECT * FROM {};'.format('table1'))
| bsd-2-clause | -1,907,212,712,318,703,400 | 38.471154 | 108 | 0.611815 | false |
bob-anderson-ok/py-ote | src/pyoteapp/csvreader.py | 1 | 8060 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 23 07:24:21 2017
@author: Bob Anderson
"""
import os
tangraNeedsBackgroundSubtraction = True
pymovieSignalColumnCount = 0
def readLightCurve(filepath):
"""
Reads the intensities and timestamps from Limovie,
Tangra, PYOTE, or R-OTE csv files. (PYOTE and R-OTE file formats are equal)
"""
if fileCanBeOpened(filepath):
readOk, errMsg, frame, time, value, ref1, ref2, ref3, extra, aperture_names, headers = readAs(filepath)
if readOk:
return frame, time, value, ref1, ref2, ref3, extra, aperture_names, headers
else:
raise Exception(errMsg)
else:
raise Exception('File could not be opened')
def fileCanBeOpened(file):
return os.path.exists(file) and os.path.isfile(file)
def getFileKind(file):
fileobject = open(file)
with fileobject:
line = fileobject.readline()
if 'Tangra' in line:
return 'Tangra'
elif 'Limovie' in line:
return 'Limovie'
elif 'PyMovie' in line:
return'PyMovie'
elif 'R-OTE' in line or line[0] == '#': # Matches PyOTE and PyMovie files too!
return 'R-OTE'
elif 'RAW' in line:
return 'raw'
else:
return '???'
# noinspection PyUnusedLocal
def tangraParser(line, frame, time, value, ref1, ref2, ref3, extra):
"""
We only accept Tangra files that have been formatted
according to the AOTA default which is ---
UT time field formatted as [hh:mm:ss.sss]
We detect the state of background subtraction (either done or needed)
An example data line: 11,[16:00:14.183],2837.8,100.0,4097.32,200.0
"""
part = line.split(',')
if len(part) < 2:
raise Exception(line + " :is an invalid Tangra file entry.")
else:
frame.append(part[0])
time.append(part[1])
try:
for item in part:
if item == '':
raise Exception(line + " :cannot be parsed. Are there empty fields in data lines? Fix them all!")
if tangraNeedsBackgroundSubtraction:
value.append(str(float(part[2]) - float(part[3])))
if len(part) >= 6:
if part[4]:
ref1.append(str(float(part[4]) - float(part[5])))
if len(part) >= 8:
if part[6]:
ref2.append(str(float(part[6]) - float(part[7])))
if len(part) >= 10:
if part[8]:
ref3.append(str(float(part[8]) - float(part[9])))
else:
value.append(part[2])
if len(part) >= 4:
if part[3]:
ref1.append(part[3])
if len(part) >= 5:
if part[4]:
ref2.append(part[4])
if len(part) >= 6:
if part[5]:
ref3.append(part[5])
except ValueError:
raise Exception(line + " :cannot be parsed. Are there empty fields?")
# noinspection PyUnusedLocal
def limovieParser(line, frame, time, value, ref1, ref2, ref3, extra):
"""
Limovie sample line ---
3.5,21381195,21381200,22,27,43.0000,,,,,2737.8,3897.32 ...
"""
part = line.split(',')
frame.append(part[0])
time.append('[' + part[3] + ':' + part[4] + ':' + part[5] + ']')
value.append(part[10])
if part[11]:
ref1.append(part[11])
if part[12]:
ref2.append(part[12])
# noinspection PyUnusedLocal
def roteParser(line, frame, time, value, ref1, ref2, ref3, extra):
"""
R-OTE sample line ---
1.00,[17:25:39.3415],2737.8,3897.32,675.3,892.12
"""
part = line.split(',')
frame.append(part[0])
time.append(part[1])
value.append(part[2])
if len(part) >= 4:
if part[3]:
ref1.append(part[3])
if len(part) >= 5:
if part[4]:
ref2.append(part[4])
if len(part) >= 6:
if part[5]:
ref3.append(part[5])
# noinspection PyUnusedLocal
def pymovieParser(line, frame, time, value, ref1, ref2, ref3, extra):
"""
R-OTE sample line ---
1.00,[17:25:39.3415],2737.8,3897.32,675.3,892.12
"""
part = line.split(',')
frame.append(part[0])
time.append(part[1])
value.append(part[2])
if len(part) >= 4 and pymovieSignalColumnCount >= 2:
if part[3]:
ref1.append(part[3])
if len(part) >= 5 and pymovieSignalColumnCount >= 3:
if part[4]:
ref2.append(part[4])
if len(part) >= 6 and pymovieSignalColumnCount >= 4:
if part[5]:
ref3.append(part[5])
if pymovieSignalColumnCount > 4:
for i in range(6, pymovieSignalColumnCount + 2):
if len(part) > i and part[i]:
extra[i-6].append(part[i])
# noinspection PyUnusedLocal
def rawParser(line, frame, time, value, secondary):
value.append(line)
def readAs(file):
global tangraNeedsBackgroundSubtraction
global pymovieSignalColumnCount
kind = getFileKind(file)
fileobject = open(file)
frame = []
time = []
value = []
ref1 = []
ref2 = []
ref3 = []
extra = []
headers = []
aperture_names = []
if kind == 'Tangra':
colHeaderKey = 'FrameNo'
parser = tangraParser
elif kind == 'R-OTE': # PYOTE uses same colHeaderKey
colHeaderKey = 'FrameNum'
parser = roteParser
elif kind == 'PyMovie':
colHeaderKey = 'FrameNum'
pymovieSignalColumnCount = 0
parser = pymovieParser
elif kind == 'Limovie':
colHeaderKey = 'No.'
parser = limovieParser
elif kind == 'raw':
colHeaderKey = 'RAW'
parser = rawParser
else:
return False, 'invalid file "kind"', frame, time, value, ref1, ref2, ref3, extra, aperture_names, headers
with fileobject:
while True:
line = fileobject.readline()
if line:
if colHeaderKey in line:
if kind == 'Tangra':
if line.find('SignalMinusBackground') > 0:
tangraNeedsBackgroundSubtraction = False
else:
tangraNeedsBackgroundSubtraction = True
if kind == 'PyMovie':
# We need to count the number of times 'signal" starts
# a column header
line = line.rstrip() # Get rid of possible trailing new line \n
parts = line.split(',')
for part in parts:
if part.startswith('signal'):
pymovieSignalColumnCount += 1
aperture_names.append(part.split('-')[1])
# If there are more than 4 columns of 'signals', we need to setup
# extra to hold those columns
for i in range(5, pymovieSignalColumnCount+1):
extra.append([])
while True:
line = fileobject.readline()
if line:
# noinspection PyBroadException
try:
parser(line, frame, time, value, ref1, ref2, ref3, extra)
except Exception as e:
return False, str(e), frame, time, value, \
ref1, ref2, ref3, extra, aperture_names, headers
else:
return True, kind, frame, time, value, ref1, ref2, ref3, extra, aperture_names, headers
headers.append(line[:-1])
else:
return (False, colHeaderKey + ' not found as first column header',
[], [], [], [], [], [], [], aperture_names, headers)
| mit | 7,009,879,351,355,001,000 | 31.897959 | 115 | 0.518734 | false |
fooelisa/netmiko | netmiko/huawei/huawei_ssh.py | 1 | 3989 | from __future__ import print_function
from __future__ import unicode_literals
import time
import re
from netmiko.cisco_base_connection import CiscoSSHConnection
from netmiko import log
class HuaweiSSH(CiscoSSHConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read()
self.set_base_prompt()
self.disable_paging(command="screen-length 0 temporary")
# Clear the read buffer
time.sleep(.3 * self.global_delay_factor)
self.clear_buffer()
def config_mode(self, config_command='system-view'):
"""Enter configuration mode."""
return super(HuaweiSSH, self).config_mode(config_command=config_command)
def exit_config_mode(self, exit_config='return'):
"""Exit configuration mode."""
return super(HuaweiSSH, self).exit_config_mode(exit_config=exit_config)
def check_config_mode(self, check_string=']'):
"""Checks whether in configuration mode. Returns a boolean."""
return super(HuaweiSSH, self).check_config_mode(check_string=check_string)
def check_enable_mode(self, *args, **kwargs):
"""Huawei has no enable mode."""
pass
def enable(self, *args, **kwargs):
"""Huawei has no enable mode."""
return ''
def exit_enable_mode(self, *args, **kwargs):
"""Huawei has no enable mode."""
return ''
def set_base_prompt(self, pri_prompt_terminator='>', alt_prompt_terminator=']',
delay_factor=1):
"""
Sets self.base_prompt
Used as delimiter for stripping of trailing prompt in output.
Should be set to something that is general and applies in multiple contexts. For Comware
this will be the router prompt with < > or [ ] stripped off.
This will be set on logging in, but not when entering system-view
"""
log.debug("In set_base_prompt")
delay_factor = self.select_delay_factor(delay_factor)
self.clear_buffer()
self.write_channel(self.RETURN)
time.sleep(.5 * delay_factor)
prompt = self.read_channel()
prompt = self.normalize_linefeeds(prompt)
# If multiple lines in the output take the last line
prompt = prompt.split(self.RESPONSE_RETURN)[-1]
prompt = prompt.strip()
# Check that ends with a valid terminator character
if not prompt[-1] in (pri_prompt_terminator, alt_prompt_terminator):
raise ValueError("Router prompt not found: {0}".format(prompt))
# Strip off any leading HRP_. characters for USGv5 HA
prompt = re.sub(r"^HRP_.", "", prompt, flags=re.M)
# Strip off leading and trailing terminator
prompt = prompt[1:-1]
prompt = prompt.strip()
self.base_prompt = prompt
log.debug("prompt: {0}".format(self.base_prompt))
return self.base_prompt
class HuaweiVrpv8SSH(HuaweiSSH):
def commit(self, comment='', delay_factor=1):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
default:
command_string = commit
comment:
command_string = commit comment <comment>
"""
delay_factor = self.select_delay_factor(delay_factor)
error_marker = 'Failed to generate committed config'
command_string = 'commit'
if comment:
command_string += ' comment "{}"'.format(comment)
output = self.config_mode()
output += self.send_command_expect(command_string, strip_prompt=False,
strip_command=False, delay_factor=delay_factor)
output += self.exit_config_mode()
if error_marker in output:
raise ValueError('Commit failed with following errors:\n\n{}'.format(output))
return output
| mit | 2,947,721,045,689,463,300 | 33.991228 | 96 | 0.626723 | false |
USGSDenverPychron/pychron | pychron/core/ui/qt/stage_component_editor.py | 1 | 2697 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.qt.QtCore import Qt
from enable.component_editor import ComponentEditor, _ComponentEditor
from enable.window import Window as EWindow
from traits.api import Event, Str
# ============= standard library imports ========================
# ============= local library imports ==========================
class Window(EWindow):
on_key_release = None
def _on_key_released(self, event):
if self.on_key_release:
self.on_key_release(event)
class _LaserComponentEditor(_ComponentEditor):
keyboard_focus = Event
def init(self, parent):
'''
Finishes initializing the editor by creating the underlying toolkit
widget.
'''
size = self._get_initial_size()
self._window = Window(parent,
size=size,
component=self.value)
self.control = self._window.control
self._window.bgcolor = self.factory.bgcolor
self._parent = parent
self.sync_value('keyboard_focus', 'keyboard_focus', mode='both')
self._window.on_key_release = self.onKeyUp
def onKeyUp(self, event):
'''
key_released looking for text repr
<-- = left
--> = right
'''
ekey = event.key()
for sk, n in ((Qt.Key_Left, 'left'),
(Qt.Key_Right, 'right'),
(Qt.Key_Up, 'up'),
(Qt.Key_Down, 'down')):
if ekey == sk:
if hasattr(self.value, 'key_released'):
self.value.key_released(n)
break
def _keyboard_focus_changed(self):
self.control.setFocus()
class LaserComponentEditor(ComponentEditor):
klass = _LaserComponentEditor
keyboard_focus = Str
# ============= EOF =============================================
| apache-2.0 | -3,721,529,640,734,366,700 | 29.647727 | 81 | 0.533556 | false |
google/fedjax | fedjax/legacy/training/logging_test.py | 1 | 1361 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax.legacy.training.logging."""
import tensorflow as tf
from fedjax.legacy.training import logging
class LoggingTest(tf.test.TestCase):
def test_log_no_root_dir(self):
logger = logging.Logger()
logger.log(
writer_name='train', metric_name='loss', metric_value=4., round_num=0)
self.assertEmpty(logger._summary_writers)
def test_log_root_dir(self):
root_dir = self.create_tempdir()
logger = logging.Logger(root_dir)
logger.log(
writer_name='train', metric_name='loss', metric_value=4.1, round_num=0)
logger.log(
writer_name='eval', metric_name='loss', metric_value=5.3, round_num=0)
self.assertCountEqual(['train', 'eval'], tf.io.gfile.listdir(root_dir))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 2,890,281,021,728,383,500 | 29.931818 | 79 | 0.703894 | false |
ryanmiao/libvirt-test-API | repos/libvirtd/upstart.py | 1 | 5758 | #!/usr/bin/env python
# Upstart libvirtd testing
#
# NOTES: Libvirtd will be restarted during test, better run this
# case alone.
import os
import re
import sys
import time
from utils import utils
from shutil import copy
required_params = ()
optional_params = {}
VIRSH_LIST = "virsh list --all"
UPSTART_CONF = "rpm -ql libvirt|grep upstart"
INITCTL_CMD = "/sbin/initctl"
SYSTEMCTL_CMD = "/bin/systemctl"
INITCTL_RELOAD_CMD = "initctl reload-configuration"
SYSTEMCTL_RELOAD_CMD = "systemctl daemon-reload"
INIT_CONF = "/etc/init/libvirtd.conf"
def libvirtd_check(logger):
"""check libvirtd status
"""
cmd = "service libvirtd status"
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to get libvirtd status")
return 1
else:
logger.info(out[0])
logger.info(VIRSH_LIST)
ret, out = utils.exec_cmd(VIRSH_LIST, shell=True)
if ret != 0:
logger.error("failed to get virsh list result")
return 1
else:
for i in range(len(out)):
logger.info(out[i])
return 0
def upstart(params):
"""Set libvirtd upstart"""
logger = params['logger']
logger.info("chkconfig libvirtd off:")
cmd = "chkconfig libvirtd off"
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed")
return 1
else:
logger.info("succeed")
cmd = "service libvirtd stop"
logger.info(cmd)
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to stop libvirtd service")
return 1
else:
logger.info(out[0])
logger.info("find libvirtd.upstart file in libvirt package:")
ret, conf = utils.exec_cmd(UPSTART_CONF, shell=True)
if ret != 0:
logger.error("can't find libvirtd.upstart as part of libvirt package")
return 1
elif conf[0]:
logger.info("succeed")
logger.info("copy %s to %s" % (conf[0], INIT_CONF))
copy(conf[0], INIT_CONF)
if os.path.exists(INITCTL_CMD):
logger.info(INITCTL_RELOAD_CMD)
ret, out = utils.exec_cmd(INITCTL_RELOAD_CMD, shell=True)
if ret != 0:
logger.error("failed to reload configuration")
return 1
else:
logger.info("succeed")
cmd = "initctl start libvirtd"
logger.info(cmd)
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to start libvirtd by initctl")
return 1
else:
logger.info(out[0])
cmd = "initctl status libvirtd"
logger.info("get libvirtd status by initctl:")
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.info("failed to get libvirtd status by initctl")
return 1
else:
logger.info(out[0])
elif os.path.exists(SYSTEMCTL_CMD):
logger.info(SYSTEMCTL_RELOAD_CMD)
ret, out = utils.exec_cmd(SYSTEMCTL_RELOAD_CMD, shell=True)
if ret != 0:
logger.error("failed to reload systemd manager configuration")
return 1
else:
logger.info("succeed")
cmd = "systemctl start libvirtd.service"
logger.info(cmd)
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to start libvirtd.service by systemctl")
return 1
else:
logger.info(out[0])
cmd = "systemctl status libvirtd.service"
logger.info("get libvirtd.service status by systemctl:")
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.info("failed to get libvirtd.service status by systemctl")
return 1
else:
logger.info(out[0])
else:
return 1
time.sleep(5)
logger.info("check the libvirtd status:")
ret = libvirtd_check(logger)
if ret:
return 1
cmd = "killall -9 libvirtd"
logger.info("kill libvirtd process")
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to kill libvirtd process")
return 1
else:
logger.info("succeed")
time.sleep(5)
logger.info("recheck libvirtd status:")
ret = libvirtd_check(logger)
if ret:
return 1
else:
logger.info("the libvirtd process successfully restarted after kill")
return 0
def upstart_clean(params):
"""clean testing environment"""
logger = params['logger']
if os.path.exists(INITCTL_CMD):
cmd = "initctl stop libvirtd"
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to stop libvirtd by initctl")
if os.path.exists(INIT_CONF):
os.remove(INIT_CONF)
ret, out = utils.exec_cmd(INITCTL_RELOAD_CMD, shell=True)
if ret != 0:
logger.error("failed to reload init confituration")
elif os.path.exists(SYSTEMCTL_CMD):
cmd = "systemctl stop libvirtd.service"
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to stop libvirtd.service by systemctl")
if os.path.exists(INIT_CONF):
os.remove(INIT_CONF)
ret, out = utils.exec_cmd(SYSTEMCTL_RELOAD_CMD, shell=True)
if ret != 0:
logger.error("failed to reload systemd manager confituration")
cmd = "service libvirtd restart"
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to restart libvirtd")
cmd = "chkconfig --level 345 libvirtd on"
ret, out = utils.exec_cmd(cmd, shell=True)
if ret != 0:
logger.error("failed to set chkconfig")
| gpl-2.0 | -985,839,490,520,528,600 | 27.646766 | 78 | 0.597777 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/string.py | 1 | 10950 | """A collection of string operations (most are no longer used in Python 1.6).
Warning: most of the code you see here isn't normally used nowadays. With
Python 1.6, many of these functions are implemented as methods on the
standard string object. They used to be implemented by a built-in module
called strop, but strop is now obsolete itself.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
punctuation -- a string containing all characters considered punctuation
printable -- a string containing all characters considered printable
"""
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
# Case conversion helpers
_idmap = ''
for i in range(256): _idmap = _idmap + chr(i)
del i
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s):
"""strip(s) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
"""
return s.strip()
# Strip leading tabs and spaces
def lstrip(s):
"""lstrip(s) -> string
Return a copy of the string s with leading whitespace removed.
"""
return s.lstrip()
# Strip trailing tabs and spaces
def rstrip(s):
"""rstrip(s) -> string
Return a copy of the string s with trailing whitespace
removed.
"""
return s.rstrip()
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=-1):
"""split(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is given, splits into at most
maxsplit words. If sep is not specified, any whitespace string
is a separator.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return s.index(*args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return s.rindex(*args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.find(*args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.rfind(*args)
# for a bit of speed
_float = float
_int = int
_long = long
_StringType = type('')
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
return _float(s)
# Convert string to integer
def atoi(s , base=10):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return _int(s, base)
# Convert string to long integer
def atol(s, base=10):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
return _long(s, base)
# Left-justify a string
def ljust(s, width):
"""ljust(s, width) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated.
"""
return s.ljust(width)
# Right-justify a string
def rjust(s, width):
"""rjust(s, width) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated.
"""
return s.rjust(width)
# Center a string
def center(s, width):
"""center(s, width) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated.
"""
return s.center(width)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if type(x) == type(''): s = x
else: s = `x`
n = len(s)
if n >= width: return s
sign = ''
if s[0] in ('-', '+'):
sign, s = s[0], s[1:]
return sign + '0'*(width-n) + s
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
return s.expandtabs(tabsize)
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
# See also regsub.capwords().
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return join(map(capitalize, s.split(sep)), sep or ' ')
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = map(None, _idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return join(L, "")
# Substring replacement (global)
def replace(s, old, new, maxsplit=-1):
"""replace (str, old, new[, maxsplit]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxsplit is
given, only the first maxsplit occurrences are replaced.
"""
return s.replace(old, new, maxsplit)
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
| mit | 8,775,389,307,668,730,000 | 27.740157 | 77 | 0.67379 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/application_gateway_backend_address_pool.py | 1 | 2579 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayBackendAddressPool(SubResource):
"""Backend Address Pool of an application gateway.
:param id: Resource ID.
:type id: str
:param backend_ip_configurations: Collection of references to IPs defined
in network interfaces.
:type backend_ip_configurations:
list[~azure.mgmt.network.v2017_06_01.models.NetworkInterfaceIPConfiguration]
:param backend_addresses: Backend addresses
:type backend_addresses:
list[~azure.mgmt.network.v2017_06_01.models.ApplicationGatewayBackendAddress]
:param provisioning_state: Provisioning state of the backend address pool
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Resource that is unique within a resource group. This name
can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'backend_addresses': {'key': 'properties.backendAddresses', 'type': '[ApplicationGatewayBackendAddress]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, id=None, backend_ip_configurations=None, backend_addresses=None, provisioning_state=None, name=None, etag=None, type=None):
super(ApplicationGatewayBackendAddressPool, self).__init__(id=id)
self.backend_ip_configurations = backend_ip_configurations
self.backend_addresses = backend_addresses
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| mit | -1,267,983,922,887,911,400 | 44.245614 | 146 | 0.649089 | false |
grlee77/numpy | numpy/lib/utils.py | 1 | 31655 | import os
import sys
import textwrap
import types
import re
import warnings
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core.overrides import set_module
from numpy.core import ndarray, ufunc, asarray
import numpy as np
__all__ = [
'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',
'deprecate_with_doc', 'get_include', 'info', 'source', 'who',
'lookfor', 'byte_bounds', 'safe_eval'
]
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate:
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
if old_name is None:
try:
old_name = func.__name__
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning, stacklevel=2)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
lines = doc.expandtabs().split('\n')
indent = _get_indent(lines[1:])
if lines[0].lstrip():
# Indent the original first line to let inspect.cleandoc()
# dedent the docstring despite the deprecation notice.
doc = indent * ' ' + doc
else:
# Remove the same leading blank lines as cleandoc() would.
skip = len(lines[0]) + 1
for line in lines[1:]:
if len(line) > indent:
break
skip += len(line) + 1
doc = doc[skip:]
depdoc = textwrap.indent(depdoc, ' ' * indent)
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def _get_indent(lines):
"""
Determines the leading whitespace that could be removed from all the lines.
"""
indent = sys.maxsize
for line in lines:
content = len(line.lstrip())
if content:
indent = min(indent, len(line) - content)
if indent == sys.maxsize:
indent = 0
return indent
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in
which case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case the
deprecation message is that `old_name` is deprecated. If given, the
deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation
Warning:
>>> olduint = np.deprecate(np.uint)
DeprecationWarning: `uint64` is deprecated! # may vary
>>> olduint(6)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array
interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second
integer is just past the last byte of the array. If `a` is not
contiguous it will not use every byte between the (`low`, `high`)
values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2); I.dtype
dtype('float64')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
bytes_a = asarray(a).dtype.itemsize
a_low = a_high = a_data
if astrides is None:
# contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the NumPy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
NumPy arrays in the globals() dictionary (all NumPy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays
present in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 80 int64
b 20 160 float64
Upper bound on total bytes = 240
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
x 2 16 float64
y 3 24 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name], ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original = 0
else:
cache[idv] = name
namestr = name
original = 1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10, maxname)
sp2 = max(10, maxshape)
sp3 = max(10, maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print(prval + "\n" + "="*(len(prval)+5) + "\n")
for k in range(len(sta)):
val = sta[k]
print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3]))
print("\nUpper bound on total bytes = %d" % totalbytes)
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works similarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of width
# characters. End lines on a comma and begin argument list indented with
# the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while True:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x], types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def _info(obj, output=sys.stdout):
"""Provide information about ndarray obj.
Parameters
----------
obj : ndarray
Must be ndarray, not checked.
output
Where printed output goes.
Notes
-----
Copied over from the numarray module prior to its removal.
Adapted somewhat as only numpy is an option now.
Called by info.
"""
extra = ""
tic = ""
bp = lambda x: x
cls = getattr(obj, '__class__', type(obj))
nm = getattr(cls, '__name__', cls)
strides = obj.strides
endian = obj.dtype.byteorder
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
print("strides: ", strides, file=output)
print("itemsize: ", obj.itemsize, file=output)
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
print("fortran: ", obj.flags.fortran, file=output)
print(
"data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
file=output
)
print("byteorder: ", end=' ', file=output)
if endian in ['|', '=']:
print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
byteswap = False
elif endian == '>':
print("%sbig%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "big"
else:
print("%slittle%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "little"
print("byteswap: ", bp(byteswap), file=output)
print("type: %s" % obj.dtype, file=output)
@set_module('numpy')
def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects. If None, information
about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is
``stdout``. The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent
to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc
import inspect
if (hasattr(object, '_ppimport_importer') or
hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
_info(object, output=output)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print("\n "
"*** Repeat reference found in %s *** " % namestr,
file=output
)
else:
objlist.append(id(obj))
print(" *** Found in %s ***" % namestr, file=output)
info(obj)
print("-"*maxwidth, file=output)
numfound += 1
except KeyError:
pass
if numfound == 0:
print("Help for %s not found." % object, file=output)
else:
print("\n "
"*** Total of %d references found. ***" % numfound,
file=output
)
elif inspect.isfunction(object) or inspect.ismethod(object):
name = object.__name__
try:
arguments = str(inspect.signature(object))
except Exception:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
try:
arguments = str(inspect.signature(object))
except Exception:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object, '__init__'):
print(inspect.getdoc(object.__init__), file=output)
else:
print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
public_methods = [meth for meth in methods if meth[0] != '_']
if public_methods:
print("\n\nMethods:\n", file=output)
for meth in public_methods:
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(
inspect.getdoc(thisobj) or "None"
)
print(" %s -- %s" % (meth, methstr), file=output)
elif hasattr(object, '__doc__'):
print(inspect.getdoc(object), file=output)
@set_module('numpy')
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a NumPy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module,
...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print("In file: %s\n" % inspect.getsourcefile(object), file=output)
print(inspect.getsource(object), file=output)
except Exception:
print("Not available for this object.", file=output)
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function
# signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
@set_module('numpy')
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation') # doctest: +SKIP
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats:
return
for name, (docstring, kind, index) in cache.items():
if kind in ('module', 'object'):
# don't show modules or objects
continue
doc = docstring.lower()
if all(w in doc for w in whats):
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print("\n".join(help_text))
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate : bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
# Local import to speed up numpy's import time.
import inspect
from io import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen:
continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if (os.path.isfile(this_py) and
mod_path.endswith('.py')):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
# Catch SystemExit, too
except BaseException:
continue
for n, v in _getmembers(item):
try:
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
except NameError:
# ref. SWIG's global cvars
# NameError: Unknown C global variable
item_name = "%s.%s" % (name, n)
mod_name = None
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
try:
doc = inspect.getdoc(item)
except NameError:
# ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except Exception:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains
non-literal code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
ValueError: malformed node or string: <_ast.Call object at 0x...>
"""
# Local import to speed up numpy's import time.
import ast
return ast.literal_eval(source)
def _median_nancheck(data, result, axis, out):
"""
Utility function to check median result from data for NaN values at the end
and return NaN in that case. Input result can also be a MaskedArray.
Parameters
----------
data : array
Input data to median function
result : Array or MaskedArray
Result of median function
axis : {int, sequence of int, None}, optional
Axis or axes along which the median was computed.
out : ndarray, optional
Output array in which to place the result.
Returns
-------
median : scalar or ndarray
Median or NaN in axes which contained NaN in the input.
"""
if data.size == 0:
return result
data = np.moveaxis(data, axis, -1)
n = np.isnan(data[..., -1])
# masked NaN values are ok
if np.ma.isMaskedArray(n):
n = n.filled(False)
if result.ndim == 0:
if n == True:
if out is not None:
out[...] = data.dtype.type(np.nan)
result = out
else:
result = data.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
result[n] = np.nan
return result
#-----------------------------------------------------------------------------
| bsd-3-clause | 2,800,243,081,563,033,600 | 30.034314 | 79 | 0.527184 | false |
EuropeanSocialInnovationDatabase/ESID-main | ESIDcrawlers/ESIDcrawlers/spiders/MOPACT.py | 1 | 5898 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import scrapy
from HTMLParser import HTMLParser
import MySQLdb
from database_access import *
import re
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class InnovageSpider(scrapy.Spider):
name = "MOPACT"
num = 0
def start_requests(self):
self.db = MySQLdb.connect(host, username, password, database, charset='utf8')
self.cursor = self.db.cursor()
urls = ["http://mopact.group.shef.ac.uk/research-posts/innovations-8/",
"http://mopact.group.shef.ac.uk/research-posts/innovations-6/",
"http://mopact.group.shef.ac.uk/research-posts/innovations/",
"http://mopact.group.shef.ac.uk/research-posts/innovations-3/",
"http://mopact.group.shef.ac.uk/research-posts/innovations-4/",
"http://mopact.group.shef.ac.uk/research-posts/innovations-5/",
"http://mopact.group.shef.ac.uk/research-posts/innovations-7/"]
# for i in range(0,32):
# urls.append("http://www.innovage.group.shef.ac.uk/innovation-database.html?page="+str(i))
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self,response):
source_page = response.url
body = response.body.strip().replace(" "," ")
#regex = '(<strong>|<b>)([A-Za-z0-9&#!;: ()-]*)(</strong>|</b>)</p>[\n ]*<p>([a-zA-Z0-9<> ="/\n]*)(<strong>|<b>)([a-zA-Z0-9<>&;, ="/\n]*)(</strong>|</b>)</p>[\n ]+<p><strong>([A-Za-z :]+)</strong></p>[\n ]*<ul>([A-Za-z0-9<>()\n,. &;/-]*)</ul>[\n ]*<p>([A-Za-z0-9\xA0 ,.\ö€£$(\)%</>\n&#;:-]*)</p>[\n ]<p>([<a-z0-9A-Z%_?&; =":/.>\n-]*)</p>'
#m = re.findall(regex,body)
projects = body.split('<hr />')
projects = projects[1].split("Return to top")
new_projects = []
for project in projects:
if "<br />" in project:
project2 = project.split("<br />")
for pro in project2:
pattern = '(<strong>|<b>)([A-Za-z0-9&#!;: ()-]*)(</strong>|</b>)'
m = re.findall(pattern, pro)
if len(m)>0 and len(m)<4:
project2=[]
project2.append(project)
for pro in project2:
if len(pro)>100:
new_projects.append(pro)
else:
new_projects.append(project)
new_projects.remove(new_projects[len(new_projects)-1])
#print "AAAA"
project_country = ""
for project in new_projects:
regex = '(<strong>|<b>)([A-Za-z0-9&#!;: ()-]*)(</strong>|</b>)'
m = re.findall(regex, project)
if m!=None:
project_name = ""
for item in m:
if project_name == "":
project_name = item[1]
print project_name
if(item[1]!="AAI Domains:" and project_name is not item[1] and item[1]!="Websites:"and item[1]!="Website:"):
project_country = item[1]
#print item
regex = '<ul>([A-Za-z0-9<>()\n,. &;/-]*)</ul>'
m = re.findall(regex, project)
if m!=None:
for item in m:
project_areas = strip_tags(item).strip().split('\n')
if "Websites" in project:
websites = strip_tags(project.split("Websites:")[1]).strip().split('\n')
if "Website:" in project:
websites = strip_tags(project.split("Website:")[1]).strip().split('\n')
if "</ul>" in project:
project_description = strip_tags(project.split("</ul>")[1].split("Website")[0])
if project_name=="Poland" or project_name=="Netherlands" or project_name=="USA":
return
project_sql = "INSERT INTO Projects (ProjectName,Type,ProjectWebpage,FirstDataSource,DataSources_idDataSources) VALUES (%s,'Social Innovation',%s,'MOPACT',4)"
self.cursor.execute(project_sql, (project_name, websites[0]))
self.db.commit()
pro_id = self.cursor.lastrowid
location_sql = "Insert into ProjectLocation(Type,Country,Projects_idProjects) VALUES('Main',%s,%s)"
self.cursor.execute(location_sql, (project_country, pro_id))
self.db.commit()
sql_desc = "Insert into AdditionalProjectData (FieldName,Value,Projects_idProjects,DateObtained,SourceURL)" \
"VALUES(%s,%s,%s,NOW(),%s)"
self.cursor.execute(sql_desc, (
"Description", project_description, pro_id, source_page))
self.db.commit()
for area in project_areas:
sql_domain = "Insert into AdditionalProjectData (FieldName,Value,Projects_idProjects,DateObtained,SourceURL)" \
"VALUES(%s,%s,%s,NOW(),%s)"
self.cursor.execute(sql_domain, (
"Domain", area, pro_id, source_page))
self.db.commit()
if len(websites) > 1:
k = 0
for w in websites:
if k == 0:
k = k + 1
continue
sql_web = "Insert into AdditionalProjectData (FieldName,Value,Projects_idProjects,DateObtained,SourceURL)" \
"VALUES(%s,%s,%s,NOW(),%s)"
self.cursor.execute(sql_web, (
"Additional WebSource", w, pro_id, source_page))
self.db.commit()
print source_page | gpl-3.0 | -5,503,429,203,674,280,000 | 46.16 | 346 | 0.511876 | false |
Microsoft/hummingbird | hummingbird/ml/supported.py | 1 | 10457 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
All operators, backends, and configurations settings supported in Hummingbird are registered here.
**Supported Backends**
PyTorch,
TorchScript,
ONNX
**Supported Operators**
BernoulliNB,
Binarizer,
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
FastICA,
GaussianNB,
GradientBoostingClassifier,
GradientBoostingRegressor,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
IsolationForest,
KernelPCA,
KBinsDiscretizer,
LinearRegression,
LinearSVC,
LogisticRegression,
LogisticRegressionCV,
MaxAbsScaler,
MinMaxScaler,
MissingIndicator,
MLPClassifier,
MLPRegressor,
MultinomialNB,
Normalizer,
OneHotEncoder,
PCA,
PolynomialFeatures,
RandomForestClassifier,
RandomForestRegressor,
RobustScaler,
SelectKBest,
SelectPercentile,
SimpleImputer,
SGDClassifier,
StandardScaler,
TreeEnsembleClassifier,
TreeEnsembleRegressor,
TruncatedSVD,
VarianceThreshold,
LGBMClassifier,
LGBMRanker,
LGBMRegressor,
XGBClassifier,
XGBRanker,
XGBRegressor
"""
from collections import defaultdict
from .exceptions import MissingConverter
from ._utils import torch_installed, sklearn_installed, lightgbm_installed, xgboost_installed, onnx_runtime_installed
def _build_sklearn_operator_list():
"""
Put all suported Sklearn operators on a list.
"""
if sklearn_installed():
# Enable experimental to import HistGradientBoosting*
from sklearn.experimental import enable_hist_gradient_boosting
# Tree-based models
from sklearn.ensemble import (
ExtraTreesClassifier,
ExtraTreesRegressor,
GradientBoostingClassifier,
GradientBoostingRegressor,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
IsolationForest,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
# Linear-based models
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
LogisticRegressionCV,
SGDClassifier,
)
# SVM-based models
from sklearn.svm import LinearSVC, SVC, NuSVC
# Imputers
from sklearn.impute import MissingIndicator, SimpleImputer
# MLP Models
from sklearn.neural_network import MLPClassifier, MLPRegressor
# Naive Bayes Models
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
# Matrix decomposition transformers
from sklearn.decomposition import PCA, KernelPCA, FastICA, TruncatedSVD
# Preprocessing
from sklearn.preprocessing import (
Binarizer,
KBinsDiscretizer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
OneHotEncoder,
PolynomialFeatures,
RobustScaler,
StandardScaler,
)
try:
from sklearn.preprocessing import Imputer
except ImportError:
# Imputer was deprecate in sklearn >= 0.22
Imputer = None
# Features
from sklearn.feature_selection import SelectKBest, SelectPercentile, VarianceThreshold
supported_ops = [
# Trees
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
GradientBoostingClassifier,
GradientBoostingRegressor,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
IsolationForest,
OneHotEncoder,
RandomForestClassifier,
RandomForestRegressor,
# Linear-methods
LinearRegression,
LinearSVC,
LogisticRegression,
LogisticRegressionCV,
SGDClassifier,
# Other models
BernoulliNB,
GaussianNB,
MLPClassifier,
MLPRegressor,
MultinomialNB,
# SVM
NuSVC,
SVC,
# Imputers
Imputer,
MissingIndicator,
SimpleImputer,
# Preprocessing
Binarizer,
KBinsDiscretizer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
# Matrix Decomposition
FastICA,
KernelPCA,
PCA,
TruncatedSVD,
# Feature selection
SelectKBest,
SelectPercentile,
VarianceThreshold,
]
# Remove all deprecated operators given the sklearn version. E.g., Imputer for sklearn > 0.21.3.
return [x for x in supported_ops if x is not None]
return []
def _build_xgboost_operator_list():
"""
List all suported XGBoost (Sklearn API) operators.
"""
if xgboost_installed():
from xgboost import XGBClassifier, XGBRanker, XGBRegressor
return [XGBClassifier, XGBRanker, XGBRegressor]
return []
def _build_lightgbm_operator_list():
"""
List all suported LightGBM (Sklearn API) operators.
"""
if lightgbm_installed():
from lightgbm import LGBMClassifier, LGBMRanker, LGBMRegressor
return [LGBMClassifier, LGBMRanker, LGBMRegressor]
return []
# Associate onnxml types with our operator names.
def _build_onnxml_operator_list():
"""
List all suported ONNXML operators.
"""
if onnx_runtime_installed():
return [
# Linear-based models
"LinearClassifier",
"LinearRegressor",
# ONNX operators.
"Cast",
"Concat",
"Reshape",
# Preprocessing
"ArrayFeatureExtractor",
"OneHotEncoder",
"Normalizer",
"Scaler",
# Tree-based models
"TreeEnsembleClassifier",
"TreeEnsembleRegressor",
]
return []
def _build_backend_map():
"""
The set of supported backends is defined here.
"""
backends = defaultdict(lambda: None)
if torch_installed():
import torch
backends[torch.__name__] = torch.__name__
backends["py" + torch.__name__] = torch.__name__ # For compatibility with earlier versions.
backends[torch.jit.__name__] = torch.jit.__name__
backends["torchscript"] = torch.jit.__name__ # For reference outside Hummingbird.
if onnx_runtime_installed():
import onnx
backends[onnx.__name__] = onnx.__name__
return backends
def _build_sklearn_api_operator_name_map():
"""
Associate Sklearn with the operator class names.
If two scikit-learn (API) models share a single name, it means they are equivalent in terms of conversion.
"""
# Pipeline ops. These are ops injected by the parser not "real" sklearn operators.
pipeline_operator_list = [
"ArrayFeatureExtractor",
"Concat",
"Multiply",
]
return {
k: "Sklearn" + k.__name__ if hasattr(k, "__name__") else k
for k in sklearn_operator_list + pipeline_operator_list + xgb_operator_list + lgbm_operator_list
}
def _build_onnxml_api_operator_name_map():
"""
Associate ONNXML with the operator class names.
If two ONNXML models share a single name, it means they are equivalent in terms of conversion.
"""
return {k: "ONNXML" + k for k in onnxml_operator_list if k is not None}
def get_sklearn_api_operator_name(model_type):
"""
Get the operator name for the input model type in *scikit-learn API* format.
Args:
model_type: A scikit-learn model object (e.g., RandomForestClassifier)
or an object with scikit-learn API (e.g., LightGBM)
Returns:
A string which stands for the type of the input model in the Hummingbird conversion framework
"""
if model_type not in sklearn_api_operator_name_map:
raise MissingConverter("Unable to find converter for model type {}.".format(model_type))
return sklearn_api_operator_name_map[model_type]
def get_onnxml_api_operator_name(model_type):
"""
Get the operator name for the input model type in *ONNX-ML API* format.
Args:
model_type: A ONNX-ML model object (e.g., TreeEnsembleClassifier)
Returns:
A string which stands for the type of the input model in the Hummingbird conversion framework.
None if the model_type is not supported
"""
if model_type not in onnxml_api_operator_name_map:
return None
return onnxml_api_operator_name_map[model_type]
# Supported operators.
sklearn_operator_list = _build_sklearn_operator_list()
xgb_operator_list = _build_xgboost_operator_list()
lgbm_operator_list = _build_lightgbm_operator_list()
onnxml_operator_list = _build_onnxml_operator_list()
sklearn_api_operator_name_map = _build_sklearn_api_operator_name_map()
onnxml_api_operator_name_map = _build_onnxml_api_operator_name_map()
# Supported backends.
backends = _build_backend_map()
# Supported configurations settings accepted by Hummingbird are defined below.
N_FEATURES = "n_features"
"""Number of features expected in the input data."""
TREE_IMPLEMENTATION = "tree_implementation"
"""Which tree implementation to use. Values can be: gemm, tree-trav, perf_tree_trav."""
ONNX_OUTPUT_MODEL_NAME = "onnx_model_name"
"""For ONNX models we can set the name of the output model."""
ONNX_INITIAL_TYPES = "onnx_initial_types"
"""For ONNX models we can explicitly set the input types and shapes."""
ONNX_TARGET_OPSET = "onnx_target_opset"
"""For ONNX models we can set the target opset to use. 9 by default."""
INPUT_NAMES = "input_names"
"""Set the names of the inputs. Assume that the numbers onf inputs_names is equal to the number of inputs."""
OUTPUT_NAMES = "output_names"
"""Set the names of the outputs."""
CONTAINER = "container"
"""Whether to return the container for Sklearn API or just the model"""
| mit | 5,985,921,669,037,165,000 | 27.493188 | 117 | 0.643588 | false |
sbrodeur/ros-icreate-bbb | src/action/scripts/record/autonomous_control.py | 1 | 2407 | #!/usr/bin/env python
# Copyright (c) 2016, Simon Brodeur
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import rospy
from action.behaviours import BehaviourController, AvoidObstacle, Wander
if __name__ == '__main__':
try:
rospy.init_node('autonomous_control', log_level=rospy.INFO)
controller = BehaviourController(rate=15)
#controller.addBehaviour(AvoidObstacle(wheelDropEnabled=False, cliffEnabled=False), priority=0)
controller.addBehaviour(AvoidObstacle(), priority=0)
controller.addBehaviour(Wander(), priority=1)
#controller.addBehaviour(Spiral(), priority=1)
#controller.addBehaviour(AutoDock(), priority=1)
#controller.addBehaviour(Charge(), priority=1)
#controller.addBehaviour(Undock(), priority=1)
#controller.addBehaviour(FollowWall(), priority=1)
controller.spin()
except rospy.ROSInterruptException: pass
| bsd-3-clause | 1,156,342,221,365,033,700 | 47.14 | 103 | 0.74865 | false |
joe-eklund/cs460 | bene/examples/transfer.py | 1 | 2999 | import sys
sys.path.append('..')
from src.sim import Sim
from src.node import Node
from src.link import Link
from src.transport import Transport
from src.tcp import TCP
from networks.network import Network
import optparse
import os
import subprocess
class AppHandler(object):
def __init__(self,filename):
self.filename = filename
self.directory = 'received'
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.f = open("%s/%s" % (self.directory,self.filename),'w')
def receive_data(self,data):
Sim.trace('AppHandler',"application got %d bytes" % (len(data)))
self.f.write(data)
self.f.flush()
class Main(object):
def __init__(self):
self.directory = 'received'
self.parse_options()
self.run()
self.diff()
def parse_options(self):
parser = optparse.OptionParser(usage = "%prog [options]",
version = "%prog 0.1")
parser.add_option("-f","--filename",type="str",dest="filename",
default='test.txt',
help="filename to send")
parser.add_option("-l","--loss",type="float",dest="loss",
default=0.0,
help="random loss rate")
(options,args) = parser.parse_args()
self.filename = options.filename
self.loss = options.loss
def diff(self):
args = ['diff','-u',self.filename,self.directory+'/'+self.filename]
result = subprocess.Popen(args,stdout = subprocess.PIPE).communicate()[0]
print
if not result:
print "File transfer correct!"
else:
print "File transfer failed. Here is the diff:"
print
print result
def run(self):
# parameters
Sim.scheduler.reset()
Sim.set_debug('AppHandler')
Sim.set_debug('TCP')
# setup network
net = Network('../networks/one-hop.txt')
net.loss(self.loss)
# setup routes
n1 = net.get_node('n1')
n2 = net.get_node('n2')
n1.add_forwarding_entry(address=n2.get_address('n1'),link=n1.links[0])
n2.add_forwarding_entry(address=n1.get_address('n2'),link=n2.links[0])
# setup transport
t1 = Transport(n1)
t2 = Transport(n2)
# setup application
a = AppHandler(self.filename)
# setup connection
c1 = TCP(t1,n1.get_address('n2'),1,n2.get_address('n1'),1,a,window=500)
c2 = TCP(t2,n2.get_address('n1'),1,n1.get_address('n2'),1,a,window=500)
# send a file
with open(self.filename,'r') as f:
while True:
data = f.read(10000)
if not data:
break
Sim.scheduler.add(delay=0, event=data, handler=c1.send)
# run the simulation
Sim.scheduler.run()
if __name__ == '__main__':
m = Main()
| gpl-3.0 | 263,981,328,076,515,230 | 28.401961 | 81 | 0.552184 | false |
d-li14/CS231n-Assignments | assignment2/cs231n/classifiers/fc_net.py | 1 | 16265 | # from builtins import range
# from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = np.random.normal(0, weight_scale, [input_dim, hidden_dim])
self.params['b1'] = np.zeros([hidden_dim])
self.params['W2'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])
self.params['b2'] = np.zeros([num_classes])
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
a, fc_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])
scores, affine_cache = affine_forward(a, self.params['W2'], self.params['b2'])
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dout = softmax_loss(scores, y)
dout, grads['W2'], grads['b2'] = affine_backward(dout, affine_cache)
dout, grads['W1'], grads['b1'] = affine_relu_backward(dout, fc_cache)
loss += .5 * self.reg * (np.sum(self.params['W1'] * self.params['W1']) + np.sum(self.params['W2'] * self.params['W2']))
grads['W2'] += self.reg * self.params['W2']
grads['W1'] += self.reg * self.params['W1']
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
self.params['W1'] = np.random.normal(0, weight_scale, [input_dim, hidden_dims[0]])
self.params['b1'] = np.zeros(hidden_dims[0])
if self.use_batchnorm:
self.params['gamma1'] = np.ones(hidden_dims[0])
self.params['beta1'] = np.zeros(hidden_dims[0])
for i in range(1, self.num_layers - 1):
self.params['W%d' % (i + 1)] = np.random.normal(0, weight_scale, [hidden_dims[i - 1], hidden_dims[i]])
self.params['b%d' % (i + 1)] = np.zeros(hidden_dims[i])
if self.use_batchnorm:
self.params['gamma%d' % (i + 1)] = np.ones(hidden_dims[i])
self.params['beta%d' % (i + 1)] = np.zeros(hidden_dims[i])
self.params['W%d' % self.num_layers] = np.random.normal(0, weight_scale, [hidden_dims[self.num_layers - 2], num_classes])
self.params['b%d' % self.num_layers] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.use_dropout:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param['mode'] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
h = {}
cache = {}
dropout_param = {}
h[0] = X.reshape([X.shape[0], -1])
for i in range(1, self.num_layers):
if self.use_batchnorm:
h[i], cache[i] = affine_bn_relu_forward(h[i - 1], self.params['W%d' % i], self.params['b%d' % i], self.params['gamma%d' % i], self.params['beta%d' % i], self.bn_params[i - 1])
else:
h[i], cache[i] = affine_relu_forward(h[i - 1], self.params['W%d' % i], self.params['b%d' % i])
if self.use_dropout:
h[i], dropout_param[i] = dropout_forward(h[i], self.dropout_param)
scores, cache[self.num_layers] = affine_forward(h[self.num_layers - 1], self.params['W%d' % self.num_layers], self.params['b%d' % self.num_layers])
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dout = softmax_loss(scores, y)
loss += .5 * self.reg * np.sum(self.params['W%d' % self.num_layers] * self.params['W%d' % self.num_layers])
dout, grads['W%d' % self.num_layers], grads['b%d' % self.num_layers] = affine_backward(dout, cache[self.num_layers])
grads['W%d' % self.num_layers] += self.reg * self.params['W%d' % self.num_layers]
for i in range(self.num_layers - 1, 0, -1):
loss += .5 * self.reg * np.sum(self.params['W%d' % i] * self.params['W%d' % i])
if self.use_dropout:
dout = dropout_backward(dout, dropout_param[i])
if self.use_batchnorm:
dout, grads['W%d' % i], grads['b%d' % i], grads['gamma%d' % i], grads['beta%d' % i] = affine_bn_relu_backward(dout, cache[i])
else:
dout, grads['W%d' % i], grads['b%d' % i] = affine_relu_backward(dout, cache[i])
grads['W%d' % i] += self.reg * self.params['W%d' % i]
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
| gpl-3.0 | 4,564,913,642,847,883,300 | 53.397993 | 177 | 0.508269 | false |
Richard-Mathie/xml_xls_loader | setup.py | 1 | 1452 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pip.req import parse_requirements
from io import open
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('Version', 'r', encoding='utf-8') as f:
version = next(f).strip()
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
requirements = parse_requirements('requirements.txt', session=False)
requirements = [str(ir.req) for ir in requirements]
__NAME__ = 'xml_xls_loader'
__doc__ = readme
__author__ = 'Richard-Mathie'
__license__ = 'GPLv3'
setup(
name=__NAME__,
version=version,
license=__license__,
description='Module to load a MS xml xls into a pandas DataFrame',
long_description=__doc__,
author=__author__,
author_email='[email protected]',
url='/'.join(['https://github.com', __author__, __NAME__]),
py_modules=['xml_xls_loader'],
platforms='any',
install_requires=requirements,
test_suite='tests',
classifiers=[
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
]
)
| gpl-3.0 | 5,534,715,379,339,304,000 | 28.632653 | 75 | 0.632231 | false |
r0bin-fr/pirok | multithreadPID.py | 1 | 4130 | import os
import glob
import time
import threading
import SSRControl
import readMaxim
#mode BOOST or not?
BOILER_BOOST_MODE = 1
#boiler max temperature for boost mode
BOILER_MAX_TEMP = 124
#boiler threshold to stop boost mode
BOILER_BOOST_GROUP_LIMIT = 84
class TaskControlPID(threading.Thread):
#default target temp is 118C
def __init__(self, taskid = 0, maximGroupe = None, maximBoiler = None, tTarget = 118):
threading.Thread.__init__(self)
self.lok = threading.Lock()
self.taskid = taskid
self._stopevent = threading.Event( )
self.maximBoiler = maximBoiler
self.maximGroupe = maximGroupe
self.currentDrive = 0
#init regulator values
self.m_timeStep = 1.0
self.m_targetTemp = tTarget
self.m_latestTemp = 20.0
self.m_latestPower = 0.0
#init PID values
self.m_dState = 0.0
self.m_iState = 0.0
self.m_iMin = -1.0
self.m_iMax = 1.0
self.m_iGain = 0.0
self.m_pGain = 1.0
self.m_dGain = 0.0
#based on James Ward's PID algorithm
def pid_update(self,error = 0.0, position = 0.0):
# calculate proportional term
pTerm = self.m_pGain * error
# calculate integral state with appropriate limiting
self.m_iState += error
if ( self.m_iState > self.m_iMax ):
self.m_iState = self.m_iMax
if ( self.m_iState < self.m_iMin ):
self.m_iState = self.m_iMin
#calculate integral term
iTerm = self.m_iGain * self.m_iState
#calculate derivative term
dTerm = self.m_dGain * (self.m_dState - position)
self.m_dState = position
return pTerm + dTerm + iTerm
def run(self):
print "Thread PID no", self.taskid, "is readry!\n > Based on James Ward's PID algorithm"
drive = 0.0
lastdrive = 0.0
#based on James Ward's PID algorithm
while not self._stopevent.isSet():
#PID computation
#timestamp
next = time.time()
#get current boiler temp
latestTemp = self.maximBoiler.getTemp()
#controle de la chaudiere
lastdrive = drive
#if temperature read is correct, start algorithm
if ( latestTemp > 0.5 ):
#calculate next time step
next += self.m_timeStep
#get current target temperature
cTargetTemp = self.getTargetTemp()
#calculate PID update
#boost mode only if boiler target temp is higher than 100C (ECO mode is 90)
if((BOILER_BOOST_MODE == 1) and (cTargetTemp > 100)):
tgroupe = self.maximGroupe.getTemp()
#stop the boost mode when group temp is higher than boiler temp - 30C (approximate)
bBoostLimit = cTargetTemp - 30
#boost boiler target temperature if we are under a certain value
if ((tgroupe > 0.5) and (tgroupe < bBoostLimit)):
drive = self.pid_update( BOILER_MAX_TEMP - latestTemp, latestTemp )
else:
drive = self.pid_update( cTargetTemp - latestTemp, latestTemp )
else:
drive = self.pid_update( cTargetTemp - latestTemp, latestTemp )
#drive = self.pid_update( self.getTargetTemp() - latestTemp, latestTemp )
#clamp the output power to sensible range
if ( drive > 1.0 ):
drive = 1.0
if ( drive < 0.0 ):
drive = 0.0
#update the boiler power (with PWM) if last state changed
if ( drive != lastdrive ):
drv = int(drive * 100)
self.setCurrentDrive( drv )
SSRControl.setBoilerPWM( drv )
#wait the remaining time (typically, slot = 1 second)
remain = next - time.time()
if ( remain > 0.0 ):
self._stopevent.wait(remain)
def stop(self):
print "stopping thread no", self.taskid
self._stopevent.set( )
def getTargetTemp(self):
#protect concurrent access with mutex
self.lok.acquire()
tt = self.m_targetTemp
self.lok.release()
return tt
def setTargetTemp(self,ttemp=115):
#protect concurrent access with mutex
self.lok.acquire()
self.m_targetTemp = ttemp
self.lok.release()
def getCurrentDrive(self):
#protect concurrent access with mutex
self.lok.acquire()
tt = self.currentDrive
self.lok.release()
return tt
def setCurrentDrive(self,drive=0):
#protect concurrent access with mutex
self.lok.acquire()
self.currentDrive = drive
self.lok.release()
| gpl-2.0 | -1,921,692,476,958,163,000 | 26.533333 | 93 | 0.67046 | false |
hugovk/pylast | tests/test_artist.py | 1 | 8172 | #!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import unittest
import pylast
from .test_pylast import TestPyLastWithLastFm
class TestPyLastArtist(TestPyLastWithLastFm):
def test_repr(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
# Act
representation = repr(artist)
# Assert
self.assertTrue(representation.startswith("pylast.Artist('Test Artist',"))
def test_artist_is_hashable(self):
# Arrange
test_artist = self.network.get_artist("Test Artist")
artist = test_artist.get_similar(limit=2)[0].item
self.assertIsInstance(artist, pylast.Artist)
# Act/Assert
self.helper_is_thing_hashable(artist)
def test_bio_published_date(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
# Act
bio = artist.get_bio_published_date()
# Assert
self.assertIsNotNone(bio)
self.assertGreaterEqual(len(bio), 1)
def test_bio_content(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
# Act
bio = artist.get_bio_content(language="en")
# Assert
self.assertIsNotNone(bio)
self.assertGreaterEqual(len(bio), 1)
def test_bio_summary(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
# Act
bio = artist.get_bio_summary(language="en")
# Assert
self.assertIsNotNone(bio)
self.assertGreaterEqual(len(bio), 1)
def test_artist_top_tracks(self):
# Arrange
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
things = artist.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
def test_artist_top_albums(self):
# Arrange
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
things = artist.get_top_albums(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Album)
def test_artist_top_albums_limit_1(self):
# Arrange
limit = 1
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
things = artist.get_top_albums(limit=limit)
# Assert
self.assertEqual(len(things), 1)
def test_artist_top_albums_limit_50(self):
# Arrange
limit = 50
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
things = artist.get_top_albums(limit=limit)
# Assert
self.assertEqual(len(things), 50)
def test_artist_top_albums_limit_100(self):
# Arrange
limit = 100
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
things = artist.get_top_albums(limit=limit)
# Assert
self.assertEqual(len(things), 100)
def test_artist_listener_count(self):
# Arrange
artist = self.network.get_artist("Test Artist")
# Act
count = artist.get_listener_count()
# Assert
self.assertIsInstance(count, int)
self.assertGreater(count, 0)
def test_tag_artist(self):
# Arrange
artist = self.network.get_artist("Test Artist")
# artist.clear_tags()
# Act
artist.add_tag("testing")
# Assert
tags = artist.get_tags()
self.assertGreater(len(tags), 0)
found = False
for tag in tags:
if tag.name == "testing":
found = True
break
self.assertTrue(found)
def test_remove_tag_of_type_text(self):
# Arrange
tag = "testing" # text
artist = self.network.get_artist("Test Artist")
artist.add_tag(tag)
# Act
artist.remove_tag(tag)
# Assert
tags = artist.get_tags()
found = False
for tag in tags:
if tag.name == "testing":
found = True
break
self.assertFalse(found)
def test_remove_tag_of_type_tag(self):
# Arrange
tag = pylast.Tag("testing", self.network) # Tag
artist = self.network.get_artist("Test Artist")
artist.add_tag(tag)
# Act
artist.remove_tag(tag)
# Assert
tags = artist.get_tags()
found = False
for tag in tags:
if tag.name == "testing":
found = True
break
self.assertFalse(found)
def test_remove_tags(self):
# Arrange
tags = ["removetag1", "removetag2"]
artist = self.network.get_artist("Test Artist")
artist.add_tags(tags)
artist.add_tags("1more")
tags_before = artist.get_tags()
# Act
artist.remove_tags(tags)
# Assert
tags_after = artist.get_tags()
self.assertEqual(len(tags_after), len(tags_before) - 2)
found1, found2 = False, False
for tag in tags_after:
if tag.name == "removetag1":
found1 = True
elif tag.name == "removetag2":
found2 = True
self.assertFalse(found1)
self.assertFalse(found2)
def test_set_tags(self):
# Arrange
tags = ["sometag1", "sometag2"]
artist = self.network.get_artist("Test Artist 2")
artist.add_tags(tags)
tags_before = artist.get_tags()
new_tags = ["settag1", "settag2"]
# Act
artist.set_tags(new_tags)
# Assert
tags_after = artist.get_tags()
self.assertNotEqual(tags_before, tags_after)
self.assertEqual(len(tags_after), 2)
found1, found2 = False, False
for tag in tags_after:
if tag.name == "settag1":
found1 = True
elif tag.name == "settag2":
found2 = True
self.assertTrue(found1)
self.assertTrue(found2)
def test_artists(self):
# Arrange
artist1 = self.network.get_artist("Radiohead")
artist2 = self.network.get_artist("Portishead")
# Act
url = artist1.get_url()
mbid = artist1.get_mbid()
image = artist1.get_cover_image()
playcount = artist1.get_playcount()
streamable = artist1.is_streamable()
name = artist1.get_name(properly_capitalized=False)
name_cap = artist1.get_name(properly_capitalized=True)
# Assert
self.assertIn("https", image)
self.assertGreater(playcount, 1)
self.assertNotEqual(artist1, artist2)
self.assertEqual(name.lower(), name_cap.lower())
self.assertEqual(url, "https://www.last.fm/music/radiohead")
self.assertEqual(mbid, "a74b1b7f-71a5-4011-9441-d0b5e4122711")
self.assertIsInstance(streamable, bool)
def test_artist_eq_none_is_false(self):
# Arrange
artist1 = None
artist2 = pylast.Artist("Test Artist", self.network)
# Act / Assert
self.assertNotEqual(artist1, artist2)
def test_artist_ne_none_is_true(self):
# Arrange
artist1 = None
artist2 = pylast.Artist("Test Artist", self.network)
# Act / Assert
self.assertNotEqual(artist1, artist2)
def test_artist_get_correction(self):
# Arrange
artist = pylast.Artist("guns and roses", self.network)
# Act
corrected_artist_name = artist.get_correction()
# Assert
self.assertEqual(corrected_artist_name, "Guns N' Roses")
def test_get_userplaycount(self):
# Arrange
artist = pylast.Artist("John Lennon", self.network, username=self.username)
# Act
playcount = artist.get_userplaycount()
# Assert
self.assertGreaterEqual(playcount, 0)
if __name__ == "__main__":
unittest.main(failfast=True)
| apache-2.0 | -7,710,875,377,642,233,000 | 26.608108 | 83 | 0.576848 | false |
sanyaade-mediadev/Transana | FormatParagraphPanel.py | 1 | 20869 | # Copyright (C) 2003 - 2015 The Board of Regents of the University of Wisconsin System
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
""" This dialog implements the Transana Format Paragraph Panel class. """
__author__ = 'David Woods <[email protected]>'
# Enable (True) or Disable (False) debugging messages
DEBUG = False
if DEBUG:
print "FormatParagraphPanel DEBUG is ON"
# import wxPython
import wx
# import the wxPython RichTextCtrl
import wx.richtext as rt
# import the TransanaGlobal variables
import TransanaGlobal
class FormatParagraphPanel(wx.Panel):
""" Transana's custom Paragraph Dialog Box. Pass in a wxFontData object (to maintain compatability with the wxFontDialog) or
a TransanaFontDef object to allow for ambiguity in the font specification. """
def __init__(self, parent, formatData):
""" Initialize the Paragraph Panel. """
self.formatData = formatData.copy()
# Create the Font Dialog
wx.Panel.__init__(self, parent, -1) # , _('Font'), style=wx.CAPTION | wx.SYSTEM_MENU | wx.THICK_FRAME)
# To look right, the Mac needs the Small Window Variant.
if "__WXMAC__" in wx.PlatformInfo:
self.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
# Create the main Sizer, which will hold the boxTop, boxMiddle, and boxButton sizers
box = wx.BoxSizer(wx.VERTICAL)
# Paragraph Alignment.
# Create the label
lblAlign = wx.StaticText(self, -1, _('Alignment:'))
box.Add(lblAlign, 0, wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.LEFT | wx.TOP, 15)
box.Add((0, 5)) # Spacer
# Create a list of alignment options. (Justified is not yet supported by the RichTextCtrl.
alignList = [_('Left'), _("Center"), _("Right")]
# Now create the Font Sizes list box
self.lbAlign = wx.Choice(self, -1, choices=alignList)
if self.formatData.paragraphAlignment == wx.TEXT_ALIGNMENT_LEFT:
self.lbAlign.SetStringSelection(_("Left"))
elif self.formatData.paragraphAlignment == wx.TEXT_ALIGNMENT_CENTRE:
self.lbAlign.SetStringSelection(_("Center"))
elif self.formatData.paragraphAlignment == wx.TEXT_ALIGNMENT_RIGHT:
self.lbAlign.SetStringSelection(_("Right"))
self.lbAlign.Bind(wx.EVT_CHOICE, self.OnAlignSelect)
# Add the boxTop sizer to the main box sizer
box.Add(self.lbAlign, 0, wx.ALIGN_LEFT | wx.EXPAND | wx.LEFT | wx.RIGHT, 15)
# Create the label
lblIndent = wx.StaticText(self, -1, _('Indentation:'))
box.Add(lblIndent, 0, wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.LEFT | wx.TOP, 15)
box.Add((0, 5)) # Spacer
indentSizer = wx.BoxSizer(wx.HORIZONTAL)
# Left Indent
leftIndentSizer = wx.BoxSizer(wx.VERTICAL)
lblLeftIndent = wx.StaticText(self, -1, _("Left:"))
leftIndentSizer.Add(lblLeftIndent, 0, wx.BOTTOM, 5)
self.txtLeftIndent = wx.TextCtrl(self, -1, "")
self.txtLeftIndent.Bind(wx.EVT_CHAR, self.OnNumOnly)
self.txtLeftIndent.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
leftIndentSizer.Add(self.txtLeftIndent, 0, wx.EXPAND)
indentSizer.Add(leftIndentSizer, 1, wx.EXPAND | wx.ALIGN_LEFT | wx.LEFT | wx.RIGHT, 15)
# First Line Indent
firstLineIndentSizer = wx.BoxSizer(wx.VERTICAL)
lblFirstLineIndent = wx.StaticText(self, -1, _("First Line:"))
firstLineIndentSizer.Add(lblFirstLineIndent, 0, wx.BOTTOM, 5)
self.txtFirstLineIndent = wx.TextCtrl(self, -1, "")
self.txtFirstLineIndent.Bind(wx.EVT_CHAR, self.OnNumOnly)
self.txtFirstLineIndent.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
firstLineIndentSizer.Add(self.txtFirstLineIndent, 0, wx.EXPAND)
indentSizer.Add(firstLineIndentSizer, 1, wx.EXPAND | wx.ALIGN_LEFT | wx.RIGHT, 15)
# Right Indent
rightIndentSizer = wx.BoxSizer(wx.VERTICAL)
lblRightIndent = wx.StaticText(self, -1, _("Right:"))
rightIndentSizer.Add(lblRightIndent, 0, wx.BOTTOM, 5)
self.txtRightIndent = wx.TextCtrl(self, -1, "")
self.txtRightIndent.Bind(wx.EVT_CHAR, self.OnNumOnly)
self.txtRightIndent.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
rightIndentSizer.Add(self.txtRightIndent, 0, wx.EXPAND)
indentSizer.Add(rightIndentSizer, 1, wx.EXPAND | wx.ALIGN_LEFT | wx.RIGHT, 15)
box.Add(indentSizer, 0, wx.EXPAND)
# Line Spacing
# Create the label
lblSpacing = wx.StaticText(self, -1, _('Line Spacing:'))
box.Add(lblSpacing, 0, wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.LEFT | wx.TOP, 15)
box.Add((0, 5)) # Spacer
# Create the list of line spacing options
spacingList = [_('Single'), _('11 point'), _('12 point'), _("One and a half"), _("Double"), _("Two and a half"), _("Triple")]
# Now create the Line Spacing list box
self.lbSpacing = wx.Choice(self, -1, choices=spacingList)
# if line spacing <= 10 ...
# If line spacing is 0, let's reset it to 11!
if self.formatData.paragraphLineSpacing == 0:
self.formatData.paragraphLineSpacing = 11
if (self.formatData.paragraphLineSpacing <= wx.TEXT_ATTR_LINE_SPACING_NORMAL):
self.lbSpacing.SetStringSelection(_("Single"))
# if line spacing <= 11 ...
elif (self.formatData.paragraphLineSpacing <= 11):
self.lbSpacing.SetStringSelection(_("11 point"))
# if line spacing <= 12 ...
elif (self.formatData.paragraphLineSpacing <= 12):
self.lbSpacing.SetStringSelection(_("12 point"))
# if line spacing <= 15 ...
elif (self.formatData.paragraphLineSpacing <= wx.TEXT_ATTR_LINE_SPACING_HALF):
self.lbSpacing.SetStringSelection(_("One and a half"))
# if line spacing <= 20 ...
elif (self.formatData.paragraphLineSpacing <= wx.TEXT_ATTR_LINE_SPACING_TWICE):
self.lbSpacing.SetStringSelection(_("Double"))
# if line spacing <= 25 ...
elif (self.formatData.paragraphLineSpacing <= 25):
self.lbSpacing.SetStringSelection(_("Two and a half"))
# if line spacing <= 30 ...
elif (self.formatData.paragraphLineSpacing <= 30):
self.lbSpacing.SetStringSelection(_("Triple"))
# if line spacing > 30, something's probably wrong ...
else:
# ... so reset line spacing to single spaced.
self.lbSpacing.SetStringSelection(_("Single"))
self.formatData.paragraphLineSpacing = 12 # wx.TEXT_ATTR_LINE_SPACING_NORMAL
# Bind the event for setting line spacing.
self.lbSpacing.Bind(wx.EVT_CHOICE, self.OnLineSpacingSelect)
# Add the boxTop sizer to the main box sizer
box.Add(self.lbSpacing, 0, wx.ALIGN_LEFT | wx.EXPAND | wx.LEFT | wx.RIGHT, 15)
# Space Before
paragraphSpacingSizer = wx.BoxSizer(wx.HORIZONTAL)
spacingBeforeSizer = wx.BoxSizer(wx.VERTICAL)
lblSpacingBefore = wx.StaticText(self, -1, _("Spacing Before:"))
spacingBeforeSizer.Add(lblSpacingBefore, 0, wx.TOP, 15)
spacingBeforeSizer.Add((0, 5)) # Spacer
self.txtSpacingBefore = wx.TextCtrl(self, -1, "")
self.txtSpacingBefore.Bind(wx.EVT_CHAR, self.OnNumOnly)
spacingBeforeSizer.Add(self.txtSpacingBefore, 0, wx.EXPAND)
paragraphSpacingSizer.Add(spacingBeforeSizer, 1, wx.EXPAND | wx.ALIGN_LEFT | wx.LEFT | wx.RIGHT, 15)
# Space After
spacingAfterSizer = wx.BoxSizer(wx.VERTICAL)
lblSpacingAfter = wx.StaticText(self, -1, _("Spacing After:"))
spacingAfterSizer.Add(lblSpacingAfter, 0, wx.TOP, 15)
spacingAfterSizer.Add((0, 5)) # Spacer
self.txtSpacingAfter = wx.TextCtrl(self, -1, "")
self.txtSpacingAfter.Bind(wx.EVT_CHAR, self.OnNumOnly)
spacingAfterSizer.Add(self.txtSpacingAfter, 0, wx.EXPAND)
paragraphSpacingSizer.Add(spacingAfterSizer, 1, wx.EXPAND | wx.ALIGN_LEFT | wx.RIGHT, 15)
box.Add(paragraphSpacingSizer, 0, wx.EXPAND)
box.Add((1, 1), 1, wx.EXPAND) # Expandable Spacer
# Error Message Text
self.errorTxt = wx.StaticText(self, -1, "")
box.Add(self.errorTxt, 0, wx.EXPAND | wx.GROW | wx.ALIGN_BOTTOM | wx.LEFT | wx.RIGHT | wx.BOTTOM, 15)
# Units Header
unitSizer = wx.BoxSizer(wx.HORIZONTAL)
lblUnits = wx.StaticText(self, -1, _("Units:"))
unitSizer.Add(lblUnits, 0, wx.RIGHT, 20)
# Inches
self.rbUnitsInches = wx.RadioButton(self, -1, _("inches"), style=wx.RB_GROUP)
unitSizer.Add(self.rbUnitsInches, 0, wx.RIGHT, 10)
# Centimeters
self.rbUnitsCentimeters = wx.RadioButton(self, -1, _("cm"))
unitSizer.Add(self.rbUnitsCentimeters, 0)
box.Add(unitSizer, 0, wx.EXPAND | wx.ALIGN_LEFT | wx.LEFT | wx.TOP | wx.RIGHT | wx.BOTTOM, 15)
if TransanaGlobal.configData.formatUnits == 'cm':
self.rbUnitsCentimeters.SetValue(True)
else:
self.rbUnitsInches.SetValue(True)
# Bind the event for selecting Units
self.Bind(wx.EVT_RADIOBUTTON, self.OnIndentUnitSelect)
# Call the event based on the initial units
self.OnIndentUnitSelect(None)
# Define box as the form's main sizer
self.SetSizer(box)
# Fit the form to the widgets created
self.Fit()
# Set this as the minimum size for the form.
self.SetSizeHints(minW = self.GetSize()[0], minH = self.GetSize()[1])
# Tell the form to maintain the layout and have it set the intitial Layout
self.SetAutoLayout(True)
self.Layout()
# Under wxPython 2.6.1.0-unicode, this form is throwing a segment fault when the color gets changed.
# The following variable prevents that!
self.closing = False
def ConvertValueToStr(self, value):
""" Get a string representation of a value in the appropriate measurement units """
# if we have an empty value ...
if (value == '') or (value == None):
# ... set the value string to blank
valStr = ''
# if we have a value ...
else:
# ... convert to the appropriate units
if self.rbUnitsInches.GetValue():
value = float(value) / 254.0
else:
value = float(value) / 100.0
# Now convert the floar to a string
valStr = "%4.3f" % value
return valStr
def ConvertStr(self, valStr):
""" Convert a string representation of a number into the approriate value for the RTC, including converting
the units appropriately """
# If Units is INCHES, the string value is in CM, since this gets called as part of conversion!
if self.rbUnitsInches.GetValue():
value = float(valStr) * 100.0
else:
value = float(valStr) * 254.0
return self.ConvertValueToStr(value)
def OnAlignSelect(self, event):
""" Handle the Select event for Paragraph Alignment """
# Convert the control's label into the appropriate wx alignment constant
if self.lbAlign.GetStringSelection() == unicode(_("Left"), 'utf8'):
self.formatData.paragraphAlignment = wx.TEXT_ALIGNMENT_LEFT
elif self.lbAlign.GetStringSelection() == unicode(_("Center"), 'utf8'):
self.formatData.paragraphAlignment = wx.TEXT_ALIGNMENT_CENTRE
elif self.lbAlign.GetStringSelection() == unicode(_("Right"), 'utf8'):
self.formatData.paragraphAlignment = wx.TEXT_ALIGNMENT_RIGHT
def OnLineSpacingSelect(self, event):
""" Handle the Select event for Line Spacing """
# Convert the control's label into the appropriate wx.RTC Line Spacing constant or integer value
if self.lbSpacing.GetStringSelection() == unicode(_("Single"), 'utf8'):
self.formatData.paragraphLineSpacing = wx.TEXT_ATTR_LINE_SPACING_NORMAL
elif self.lbSpacing.GetStringSelection() == unicode(_("11 point"), 'utf8'):
self.formatData.paragraphLineSpacing = 11
elif self.lbSpacing.GetStringSelection() == unicode(_("12 point"), 'utf8'):
self.formatData.paragraphLineSpacing = 12
elif self.lbSpacing.GetStringSelection() == unicode(_("One and a half"), 'utf8'):
self.formatData.paragraphLineSpacing = wx.TEXT_ATTR_LINE_SPACING_HALF
elif self.lbSpacing.GetStringSelection() == unicode(_("Double"), 'utf8'):
self.formatData.paragraphLineSpacing = wx.TEXT_ATTR_LINE_SPACING_TWICE
elif self.lbSpacing.GetStringSelection() == unicode(_("Two and a half"), 'utf8'):
self.formatData.paragraphLineSpacing = 25
elif self.lbSpacing.GetStringSelection() == unicode(_("Triple"), 'utf8'):
self.formatData.paragraphLineSpacing = 30
def OnIndentUnitSelect(self, event):
""" Handle the selection of one of the Units radio buttons """
# The Left Indent from the formatting point of view is the sum of the LeftIndent and LeftSubIndent values!
if (self.formatData.paragraphLeftIndent != None) and (self.formatData.paragraphLeftSubIndent != None):
if self.txtLeftIndent.GetValue() == '':
leftIndentVal = self.formatData.paragraphLeftIndent + self.formatData.paragraphLeftSubIndent
leftIndentValStr = self.ConvertValueToStr(leftIndentVal)
else:
leftIndentValStr = self.ConvertStr(self.txtLeftIndent.GetValue())
else:
leftIndentValStr = ''
self.txtLeftIndent.SetValue(leftIndentValStr)
# The First Line Indent from the formatting point of view is the negative of the LeftSubIndent values!
if (self.formatData.paragraphLeftSubIndent != None):
if self.txtFirstLineIndent.GetValue() == '':
firstLineIndentVal = 0 - self.formatData.paragraphLeftSubIndent
firstLineIndentValStr = self.ConvertValueToStr(firstLineIndentVal)
else:
firstLineIndentValStr = self.ConvertStr(self.txtFirstLineIndent.GetValue())
else:
firstLineIndentValStr = ''
self.txtFirstLineIndent.SetValue(firstLineIndentValStr)
# Right Indent
if (self.formatData.paragraphRightIndent != None):
if self.txtRightIndent.GetValue() == '':
# The Right Indent is just the RightIndent value!
rightIndentVal = self.formatData.paragraphRightIndent
rightIndentValStr = self.ConvertValueToStr(rightIndentVal)
else:
rightIndentValStr = self.ConvertStr(self.txtRightIndent.GetValue())
else:
rightIndentValStr = ''
self.txtRightIndent.SetValue(rightIndentValStr)
# Spacing Before
if (self.formatData.paragraphSpaceBefore != None):
if self.txtSpacingBefore.GetValue() == '':
spaceBeforeVal = self.formatData.paragraphSpaceBefore
spaceBeforeValStr = self.ConvertValueToStr(spaceBeforeVal)
else:
spaceBeforeValStr = self.ConvertStr(self.txtSpacingBefore.GetValue())
else:
spaceBeforeValStr = ''
self.txtSpacingBefore.SetValue(spaceBeforeValStr)
# Spacing After
if (self.formatData.paragraphSpaceAfter != None):
if self.txtSpacingAfter.GetValue() == '':
spaceAfterVal = self.formatData.paragraphSpaceAfter
spaceAfterValStr = self.ConvertValueToStr(spaceAfterVal)
else:
spaceAfterValStr = self.ConvertStr(self.txtSpacingAfter.GetValue())
else:
spaceAfterValStr = ''
self.txtSpacingAfter.SetValue(spaceAfterValStr)
# Update the Configuration data to reflect the selected unit type
if self.rbUnitsInches.GetValue():
TransanaGlobal.configData.formatUnits = 'in'
else:
TransanaGlobal.configData.formatUnits = 'cm'
# Save the configuration change immediately
TransanaGlobal.configData.SaveConfiguration()
def OnNumOnly(self, event):
""" EVT_CHAR handler for controls that MUST be numeric values """
# Determine which control sent the event
ctrl = event.GetEventObject()
# Assume we should NOT skip to the control's parent's event handler unless proven otherwise.
# (The character will NOT be processed unless we call Skip().)
shouldSkip = False
# If the ALT, CMD, CTRL, META, or SHIFT key is down ...
if event.AltDown() or event.CmdDown() or event.ControlDown() or event.MetaDown() or event.ShiftDown():
# ... call event.Skip()
event.Skip()
# If MINUS is pressed ...
elif event.GetKeyCode() == ord('-'):
# ... this key is only valid for the FIRST POSITION of the FIRST LINE INDENT control
if (ctrl.GetId() == self.txtFirstLineIndent.GetId()) and (ctrl.GetInsertionPoint() == 0):
# ... so if those are the conditions, Skip() is okay. It's okay to add the character.
shouldSkip = True
# if DECIMAL is pressed ...
elif event.GetKeyCode() == ord('.'):
# If there is no decimal point already, OR if the decimal is inside the current selection, which will be over-written ...
if (ctrl.GetValue().find('.') == -1) or (ctrl.GetStringSelection().find('.') > -1):
# ... then it's okay to add a decimal point
shouldSkip = True
# if a DIGIT is pressed ...
elif event.GetKeyCode() in [ord('0'), ord('1'), ord('2'), ord('3'), ord('4'), ord('5'), ord('6'), ord('7'), ord('8'), ord('9')]:
# ... then it's okay to add the character
shouldSkip = True
# if cursor left, cursor right, backspace, or Delete are pressed ...
elif event.GetKeyCode() in [wx.WXK_LEFT, wx.WXK_RIGHT, wx.WXK_BACK, wx.WXK_DELETE]:
# ... then it's okay to add the character
shouldSkip = True
# If we should process the character ...
if shouldSkip:
# ... then process the character!
event.Skip()
def OnKeyUp(self, event):
""" Handle the EVT_KEY_UP event for margins -- provides error checking """
# Process the Key Up event at the parent level
event.Skip()
# Convert Left Indent to a float
try:
leftVal = float(self.txtLeftIndent.GetValue())
except:
leftVal = 0.0
# Convert First Line Indent to a float
try:
firstLineVal = float(self.txtFirstLineIndent.GetValue())
except:
firstLineVal = 0.0
# Convert Right Indent to a float
try:
rightVal = float(self.txtRightIndent.GetValue())
except:
rightVal = 0.0
# Convert Form Values to RichTextCtrl values
# ... left subindent is 0 minus first line indent!
leftSubIndent = 0.0 - firstLineVal
# ... left indent is left indent plus first line indent!
leftIndent = leftVal + firstLineVal
# ... right indent is right indent.
rightIndent = rightVal
# Initialize the error message
errMsg = ''
# if left indent > 4 inches ...
if (self.rbUnitsInches.GetValue() and (leftIndent > 4.0)) or \
(self.rbUnitsCentimeters.GetValue() and (leftIndent > 10.0)):
# ... suggest that the left margin may be too big!
errMsg = _("Left Margin may be too large.\n")
# If left indent < 0 ...
if (leftIndent < 0.0):
# ... report that the first line indent is too big.
errMsg += _("First Line Indent exceeds Left Margin.\n")
# if right indent > 4 inches ...
if (self.rbUnitsInches.GetValue() and (rightIndent > 4.0)) or \
(self.rbUnitsCentimeters.GetValue() and (rightIndent > 10.0)):
# ... suggest that the right margin may be too big!
errMsg += _("Right Margin may be too large.\n")
# Display the Error Message
self.errorTxt.SetLabel(errMsg)
| gpl-2.0 | 4,868,448,822,781,293,000 | 46.321995 | 136 | 0.630169 | false |
michelesr/network-monitor-server | src/netmapping.py | 1 | 2263 | #! /usr/bin/env python
"""
Framework per il monitoraggio della rete
Modulo per la scansione multithread della rete
"""
from threading import Thread
from addresses import get_network_address
from scan import host_discovery
from host import Host
from time import sleep
from sys import argv, exit
# classe che rappresenta il motore della scansione
class NetMapper(Thread):
# costruttore
def __init__(self):
Thread.__init__(self)
self.setDaemon(True)
self.threads = []
def run(self):
"""
Metodo run del thread, viene richiamato tramite start().
Viene eseguito un loop che cerca a intervalli di 30 secondi
nuovi hosts sulla rete e per ogni host che trova inizializza
un thread che ne raccoglie le informazioni.
I vari thread vengono raccolti all'interno di una lista.
L'indirizzo della rete viene preso dalla linea di comando o se
non fornito si cerca di indovinarlo a partire dall'ip della
macchina (assumendo che la netmask sia 255.255.255.0
come spesso si verifica).
"""
self.known_hosts = []
if '-n' in argv:
network_address = argv[argv.index('-n') + 1]
elif '--network' in argv:
network_address = argv[argv.index('--network') + 1]
else:
network_address = get_network_address()
if not network_address:
print("Cannot find network address... program will continue without network scanning!\n" +
"If this trouble persist, try providing the network address in the launch command!\n" +
"Press CTRL-C to terminate!")
exit()
while(True):
hosts = host_discovery(network_address)
for host in hosts:
if not (host in self.known_hosts):
self.known_hosts.append(host)
print("Starting thread for host %s" % host)
thread = Host(host)
self.threads.append(thread)
thread.start()
for thread in self.threads:
if not thread.is_alive:
self.known_hosts.remove(thread.info['ip'])
sleep(30)
| gpl-3.0 | -3,415,007,167,212,665,300 | 30.430556 | 105 | 0.593902 | false |
kaushik94/sympy | sympy/utilities/tests/test_autowrap.py | 4 | 14713 | # Tests that require installed backends go into
# sympy/test_external/test_autowrap
import os
import tempfile
import shutil
from sympy.core import symbols, Eq
from sympy.core.compatibility import StringIO
from sympy.utilities.autowrap import (autowrap, binary_function,
CythonCodeWrapper, UfuncifyCodeWrapper, CodeWrapper)
from sympy.utilities.codegen import (
CCodeGen, C99CodeGen, CodeGenArgumentListError, make_routine
)
from sympy.utilities.pytest import raises
from sympy.utilities.tmpfiles import TmpFileManager
def get_string(dump_fn, routines, prefix="file", **kwargs):
"""Wrapper for dump_fn. dump_fn writes its results to a stream object and
this wrapper returns the contents of that stream as a string. This
auxiliary function is used by many tests below.
The header and the empty lines are not generator to facilitate the
testing of the output.
"""
output = StringIO()
dump_fn(routines, output, prefix, **kwargs)
source = output.getvalue()
output.close()
return source
def test_cython_wrapper_scalar_function():
x, y, z = symbols('x,y,z')
expr = (x + y)*z
routine = make_routine("test", expr)
code_gen = CythonCodeWrapper(CCodeGen())
source = get_string(code_gen.dump_pyx, [routine])
expected = (
"cdef extern from 'file.h':\n"
" double test(double x, double y, double z)\n"
"\n"
"def test_c(double x, double y, double z):\n"
"\n"
" return test(x, y, z)")
assert source == expected
def test_cython_wrapper_outarg():
from sympy import Equality
x, y, z = symbols('x,y,z')
code_gen = CythonCodeWrapper(C99CodeGen())
routine = make_routine("test", Equality(z, x + y))
source = get_string(code_gen.dump_pyx, [routine])
expected = (
"cdef extern from 'file.h':\n"
" void test(double x, double y, double *z)\n"
"\n"
"def test_c(double x, double y):\n"
"\n"
" cdef double z = 0\n"
" test(x, y, &z)\n"
" return z")
assert source == expected
def test_cython_wrapper_inoutarg():
from sympy import Equality
x, y, z = symbols('x,y,z')
code_gen = CythonCodeWrapper(C99CodeGen())
routine = make_routine("test", Equality(z, x + y + z))
source = get_string(code_gen.dump_pyx, [routine])
expected = (
"cdef extern from 'file.h':\n"
" void test(double x, double y, double *z)\n"
"\n"
"def test_c(double x, double y, double z):\n"
"\n"
" test(x, y, &z)\n"
" return z")
assert source == expected
def test_cython_wrapper_compile_flags():
from sympy import Equality
x, y, z = symbols('x,y,z')
routine = make_routine("test", Equality(z, x + y))
code_gen = CythonCodeWrapper(CCodeGen())
expected = """\
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
cy_opts = {}
ext_mods = [Extension(
'wrapper_module_%(num)s', ['wrapper_module_%(num)s.pyx', 'wrapped_code_%(num)s.c'],
include_dirs=[],
library_dirs=[],
libraries=[],
extra_compile_args=['-std=c99'],
extra_link_args=[]
)]
setup(ext_modules=cythonize(ext_mods, **cy_opts))
""" % {'num': CodeWrapper._module_counter}
temp_dir = tempfile.mkdtemp()
TmpFileManager.tmp_folder(temp_dir)
setup_file_path = os.path.join(temp_dir, 'setup.py')
code_gen._prepare_files(routine, build_dir=temp_dir)
with open(setup_file_path) as f:
setup_text = f.read()
assert setup_text == expected
code_gen = CythonCodeWrapper(CCodeGen(),
include_dirs=['/usr/local/include', '/opt/booger/include'],
library_dirs=['/user/local/lib'],
libraries=['thelib', 'nilib'],
extra_compile_args=['-slow-math'],
extra_link_args=['-lswamp', '-ltrident'],
cythonize_options={'compiler_directives': {'boundscheck': False}}
)
expected = """\
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
cy_opts = {'compiler_directives': {'boundscheck': False}}
ext_mods = [Extension(
'wrapper_module_%(num)s', ['wrapper_module_%(num)s.pyx', 'wrapped_code_%(num)s.c'],
include_dirs=['/usr/local/include', '/opt/booger/include'],
library_dirs=['/user/local/lib'],
libraries=['thelib', 'nilib'],
extra_compile_args=['-slow-math', '-std=c99'],
extra_link_args=['-lswamp', '-ltrident']
)]
setup(ext_modules=cythonize(ext_mods, **cy_opts))
""" % {'num': CodeWrapper._module_counter}
code_gen._prepare_files(routine, build_dir=temp_dir)
with open(setup_file_path) as f:
setup_text = f.read()
assert setup_text == expected
expected = """\
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
cy_opts = {'compiler_directives': {'boundscheck': False}}
import numpy as np
ext_mods = [Extension(
'wrapper_module_%(num)s', ['wrapper_module_%(num)s.pyx', 'wrapped_code_%(num)s.c'],
include_dirs=['/usr/local/include', '/opt/booger/include', np.get_include()],
library_dirs=['/user/local/lib'],
libraries=['thelib', 'nilib'],
extra_compile_args=['-slow-math', '-std=c99'],
extra_link_args=['-lswamp', '-ltrident']
)]
setup(ext_modules=cythonize(ext_mods, **cy_opts))
""" % {'num': CodeWrapper._module_counter}
code_gen._need_numpy = True
code_gen._prepare_files(routine, build_dir=temp_dir)
with open(setup_file_path) as f:
setup_text = f.read()
assert setup_text == expected
TmpFileManager.cleanup()
def test_cython_wrapper_unique_dummyvars():
from sympy import Dummy, Equality
x, y, z = Dummy('x'), Dummy('y'), Dummy('z')
x_id, y_id, z_id = [str(d.dummy_index) for d in [x, y, z]]
expr = Equality(z, x + y)
routine = make_routine("test", expr)
code_gen = CythonCodeWrapper(CCodeGen())
source = get_string(code_gen.dump_pyx, [routine])
expected_template = (
"cdef extern from 'file.h':\n"
" void test(double x_{x_id}, double y_{y_id}, double *z_{z_id})\n"
"\n"
"def test_c(double x_{x_id}, double y_{y_id}):\n"
"\n"
" cdef double z_{z_id} = 0\n"
" test(x_{x_id}, y_{y_id}, &z_{z_id})\n"
" return z_{z_id}")
expected = expected_template.format(x_id=x_id, y_id=y_id, z_id=z_id)
assert source == expected
def test_autowrap_dummy():
x, y, z = symbols('x y z')
# Uses DummyWrapper to test that codegen works as expected
f = autowrap(x + y, backend='dummy')
assert f() == str(x + y)
assert f.args == "x, y"
assert f.returns == "nameless"
f = autowrap(Eq(z, x + y), backend='dummy')
assert f() == str(x + y)
assert f.args == "x, y"
assert f.returns == "z"
f = autowrap(Eq(z, x + y + z), backend='dummy')
assert f() == str(x + y + z)
assert f.args == "x, y, z"
assert f.returns == "z"
def test_autowrap_args():
x, y, z = symbols('x y z')
raises(CodeGenArgumentListError, lambda: autowrap(Eq(z, x + y),
backend='dummy', args=[x]))
f = autowrap(Eq(z, x + y), backend='dummy', args=[y, x])
assert f() == str(x + y)
assert f.args == "y, x"
assert f.returns == "z"
raises(CodeGenArgumentListError, lambda: autowrap(Eq(z, x + y + z),
backend='dummy', args=[x, y]))
f = autowrap(Eq(z, x + y + z), backend='dummy', args=[y, x, z])
assert f() == str(x + y + z)
assert f.args == "y, x, z"
assert f.returns == "z"
f = autowrap(Eq(z, x + y + z), backend='dummy', args=(y, x, z))
assert f() == str(x + y + z)
assert f.args == "y, x, z"
assert f.returns == "z"
def test_autowrap_store_files():
x, y = symbols('x y')
tmp = tempfile.mkdtemp()
TmpFileManager.tmp_folder(tmp)
f = autowrap(x + y, backend='dummy', tempdir=tmp)
assert f() == str(x + y)
assert os.access(tmp, os.F_OK)
TmpFileManager.cleanup()
def test_autowrap_store_files_issue_gh12939():
x, y = symbols('x y')
tmp = './tmp'
try:
f = autowrap(x + y, backend='dummy', tempdir=tmp)
assert f() == str(x + y)
assert os.access(tmp, os.F_OK)
finally:
shutil.rmtree(tmp)
def test_binary_function():
x, y = symbols('x y')
f = binary_function('f', x + y, backend='dummy')
assert f._imp_() == str(x + y)
def test_ufuncify_source():
x, y, z = symbols('x,y,z')
code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify"))
routine = make_routine("test", x + y + z)
source = get_string(code_wrapper.dump_c, [routine])
expected = """\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include "file.h"
static PyMethodDef wrapper_module_%(num)sMethods[] = {
{NULL, NULL, 0, NULL}
};
static void test_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
char *in0 = args[0];
char *in1 = args[1];
char *in2 = args[2];
char *out0 = args[3];
npy_intp in0_step = steps[0];
npy_intp in1_step = steps[1];
npy_intp in2_step = steps[2];
npy_intp out0_step = steps[3];
for (i = 0; i < n; i++) {
*((double *)out0) = test(*(double *)in0, *(double *)in1, *(double *)in2);
in0 += in0_step;
in1 += in1_step;
in2 += in2_step;
out0 += out0_step;
}
}
PyUFuncGenericFunction test_funcs[1] = {&test_ufunc};
static char test_types[4] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE};
static void *test_data[1] = {NULL};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"wrapper_module_%(num)s",
NULL,
-1,
wrapper_module_%(num)sMethods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_wrapper_module_%(num)s(void)
{
PyObject *m, *d;
PyObject *ufunc0;
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
ufunc0 = PyUFunc_FromFuncAndData(test_funcs, test_data, test_types, 1, 3, 1,
PyUFunc_None, "wrapper_module_%(num)s", "Created in SymPy with Ufuncify", 0);
PyDict_SetItemString(d, "test", ufunc0);
Py_DECREF(ufunc0);
return m;
}
#else
PyMODINIT_FUNC initwrapper_module_%(num)s(void)
{
PyObject *m, *d;
PyObject *ufunc0;
m = Py_InitModule("wrapper_module_%(num)s", wrapper_module_%(num)sMethods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
ufunc0 = PyUFunc_FromFuncAndData(test_funcs, test_data, test_types, 1, 3, 1,
PyUFunc_None, "wrapper_module_%(num)s", "Created in SymPy with Ufuncify", 0);
PyDict_SetItemString(d, "test", ufunc0);
Py_DECREF(ufunc0);
}
#endif""" % {'num': CodeWrapper._module_counter}
assert source == expected
def test_ufuncify_source_multioutput():
x, y, z = symbols('x,y,z')
var_symbols = (x, y, z)
expr = x + y**3 + 10*z**2
code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify"))
routines = [make_routine("func{}".format(i), expr.diff(var_symbols[i]), var_symbols) for i in range(len(var_symbols))]
source = get_string(code_wrapper.dump_c, routines, funcname='multitest')
expected = """\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include "file.h"
static PyMethodDef wrapper_module_%(num)sMethods[] = {
{NULL, NULL, 0, NULL}
};
static void multitest_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
char *in0 = args[0];
char *in1 = args[1];
char *in2 = args[2];
char *out0 = args[3];
char *out1 = args[4];
char *out2 = args[5];
npy_intp in0_step = steps[0];
npy_intp in1_step = steps[1];
npy_intp in2_step = steps[2];
npy_intp out0_step = steps[3];
npy_intp out1_step = steps[4];
npy_intp out2_step = steps[5];
for (i = 0; i < n; i++) {
*((double *)out0) = func0(*(double *)in0, *(double *)in1, *(double *)in2);
*((double *)out1) = func1(*(double *)in0, *(double *)in1, *(double *)in2);
*((double *)out2) = func2(*(double *)in0, *(double *)in1, *(double *)in2);
in0 += in0_step;
in1 += in1_step;
in2 += in2_step;
out0 += out0_step;
out1 += out1_step;
out2 += out2_step;
}
}
PyUFuncGenericFunction multitest_funcs[1] = {&multitest_ufunc};
static char multitest_types[6] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE};
static void *multitest_data[1] = {NULL};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"wrapper_module_%(num)s",
NULL,
-1,
wrapper_module_%(num)sMethods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_wrapper_module_%(num)s(void)
{
PyObject *m, *d;
PyObject *ufunc0;
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
ufunc0 = PyUFunc_FromFuncAndData(multitest_funcs, multitest_data, multitest_types, 1, 3, 3,
PyUFunc_None, "wrapper_module_%(num)s", "Created in SymPy with Ufuncify", 0);
PyDict_SetItemString(d, "multitest", ufunc0);
Py_DECREF(ufunc0);
return m;
}
#else
PyMODINIT_FUNC initwrapper_module_%(num)s(void)
{
PyObject *m, *d;
PyObject *ufunc0;
m = Py_InitModule("wrapper_module_%(num)s", wrapper_module_%(num)sMethods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
ufunc0 = PyUFunc_FromFuncAndData(multitest_funcs, multitest_data, multitest_types, 1, 3, 3,
PyUFunc_None, "wrapper_module_%(num)s", "Created in SymPy with Ufuncify", 0);
PyDict_SetItemString(d, "multitest", ufunc0);
Py_DECREF(ufunc0);
}
#endif""" % {'num': CodeWrapper._module_counter}
assert source == expected
| bsd-3-clause | 7,184,166,077,250,997,000 | 30.371002 | 122 | 0.601373 | false |
bsautrey/python-mapreduce | mapper.py | 1 | 6535 | # mapper.py is the map step of mapreduce.
import sys,random,uuid,os,fcntl,gzip,errno
from time import time
from glob import glob
from math import fmod
import ujson
from shuffler import Shuffle
from configs_parser import get_configs
class Map():
def __init__(self,map_function_name,project_name,input_dirs,server_id,job_name,server_names,hold_state=False,downsample=1.0,auxiliary_data_name=None,max_number_dumped_items_shuffler=500000,simultaneous_files_in_redis=10):
# configs
configs = get_configs(self.__module__)
self.base_dir = configs['base_dir']
self.base_projects_dir = configs['base_projects_dir']
self.auxiliary_dir = configs['auxiliary_dir']
self.number_of_servers_per_location = configs['number_of_servers_per_location']
# variables
self.map_function_name = map_function_name
self.project_name = project_name
self.input_dirs = input_dirs
self.server_id = server_id
self.job_name = job_name
self.hold_state = hold_state
self.downsample = downsample
self.max_number_dumped_items_shuffler = max_number_dumped_items_shuffler
self.simultaneous_files_in_redis = simultaneous_files_in_redis
# shared references
self.map_function = None
self.shuffler = Shuffle(job_name,server_names,server_id,max_number_dumped_items_shuffler,simultaneous_files_in_redis)
self.state = self._read_state()
self.file_names = self._get_file_names()
self.auxiliary_data = self._get_auxiliary_data(auxiliary_data_name)
def map(self):
print 'MAPPING...'
project_path = self.base_projects_dir +'/'+ self.project_name
if project_path in sys.path:
sys.path.remove(project_path)
sys.path.insert(0,project_path)
import map_functions
reload(map_functions)
from map_functions import MapFunctions
self.map_function = MapFunctions(self.map_function_name).get_map_function()
for file_name in self.file_names:
print 'READING:',file_name
file = self._get_file_reference(file_name)
for line in file:
items = self.map_function(line,self.auxiliary_data)
for item in items:
key = item[0]
self.shuffler.append(key,item)
file.close()
if self.hold_state:
time_stamp = time()
self.state[file_name] = time_stamp
self._write_state()
# shuffle
print 'SHUFFLING...'
self.shuffler.shuffle()
def _get_file_names(self):
all_file_names = []
for input_dir in self.input_dirs:
file_names = glob(input_dir+'/*')
for file_name in file_names:
if self._file_is_mine(file_name) and self._check_file(file_name):
all_file_names.append(file_name)
elif self._have_seen(file_name):
print 'HAVE SEEN/REJECTED FILE:',file_name
print 'ALL FILES:',all_file_names
return all_file_names
def _get_file_reference(self,file_name):
file_extension = file_name[-3:]
if file_extension == '.gz':
file = gzip.open(file_name)
else:
file = open(file_name)
return file
def _file_is_mine(self,file_name):
bucket_id = fmod(self.server_id,self.number_of_servers_per_location)
h = self._hash(file_name)
if fmod(h,self.number_of_servers_per_location) == bucket_id and self._downsample() and not self._have_seen(file_name):
print 'FILE IS MINE:',file_name
return True
def _downsample(self):
rand = random.uniform(0,1)
if rand <= self.downsample:
return True
else:
return False
def _have_seen(self,file_name):
if file_name in self.state:
return True
else:
return False
def _read_state(self):
global_state = {}
token = '/STATE_'+ self.job_name +'_SERVER_ID_'+str(self.server_id)+'_'
state_file_names = filter(lambda state_file_name: token in state_file_name,glob(self.auxiliary_dir+'/*'))
for state_file_name in state_file_names:
if self.hold_state:
print 'READING STATE:',state_file_name
f = open(state_file_name)
s = f.read()
f.close()
state = ujson.loads(s)
for file_name in state:
print '\tINCLUDING FILE:',file_name
time_stamp = state[file_name]
global_state[file_name] = time_stamp
print 'DELETING STATE:',state_file_name
os.remove(state_file_name)
print 'GLOBAL STATE:',global_state
return global_state
def _write_state(self):
output_file_name = self.auxiliary_dir +'/STATE_'+ self.job_name +'_SERVER_ID_'+str(self.server_id)+'_'+ str(uuid.uuid4()) +'.data'
f = open(output_file_name,'w')
s = ujson.dumps(self.state)
f.write(s)
f.close()
print 'WROTE STATE:',output_file_name
def _hash(self,file_name):
random.seed(file_name)
h = int(random.uniform(0,1)*1000000)
return h
def _get_auxiliary_data(self,auxiliary_data_name):
if auxiliary_data_name:
fn = self.auxiliary_dir +'/'+ auxiliary_data_name + '.data'
f = open(fn)
s = f.read()
f.close()
auxiliary_data = ujson.loads(s)
return auxiliary_data
def _check_file(self,file_name):
file = open(file_name)
if self._lock_file(file):
self._unlock_file(file)
file.close()
return True
else:
file.close()
return False
def _lock_file(self,file):
try:
# attempt lock
fcntl.flock(file, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
except IOError as e:
if e.errno != errno.EAGAIN:
raise
else:
return False
def _unlock_file(self,file):
fcntl.flock(file, fcntl.LOCK_UN)
| mit | -6,503,421,937,321,776,000 | 34.521739 | 225 | 0.55088 | false |
drewcsillag/skunkweb | pylibs/pargen/LALR.py | 1 | 2138 | #
# Copyright (C) 2001 Andrew T. Csillag <[email protected]>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
#
"""LALR(1) operations"""
import LR0
import LR1
import copy
from Common import *
def canonicalLRToLALR(C):
"""converts a canonical LR(1) set of items to an LALR(1) set"""
nC = []
for i in range(len(C)):
I = C[i]
#since we're building nC one bit at a time, there will be at
#most one duplicate of I in nC
#find dup rules (ignoring lookaheads)
try:
dup = nC.index(I)
except: #no duplicate, add to nC
nC.append(copy.deepcopy(I))
else: #duplicate found, update lookaheads
for ncItem, CItem in map(None, nC[dup], I):
ncItem.lookaheads.addSet(CItem.lookaheads)
return nC
def compareSet(old, new):
"""returns:
1 if new has lookaheads not in old
0 otherwise
"""
for oldItem, newItem in map(None, old, new):
if not oldItem.lookaheads.contains(newItem.lookaheads):
return 1
return 0
def updateLookaheads(old, new):
"""add the lookaheads from new to old"""
for oldItem, newItem in map(None, old, new):
oldItem.lookaheads.addSet(newItem.lookaheads)
def items(ruleSet, terminals, nonTerminals):
"""compute LALR1 items for ruleset"""
symbols = nonTerminals + terminals
#start with closure of [ [S' -> S, $] ]
C = [LR1.closure([startItem], ruleSet, terminals)]
added = 1
while added:
added = 0
for I in C:
for X in symbols:
g = LR1.goto(I, X, ruleSet, terminals)
if g and not g in C: #if no core already there:
added = 1
C.append(g)
elif g and g in C: #if core is there:
target = C[C.index(g)]
if compareSet(target, g):
added = 1
updateLookaheads(target, g)
return C
| gpl-2.0 | 8,353,669,085,466,037,000 | 28.287671 | 68 | 0.563611 | false |
gchrupala/reimaginet | imaginet/defn/audiovis4.py | 1 | 5361 | from funktional.layer import Layer, Dense, StackedGRU, StackedGRUH0, Convolution1D, \
Embedding, OneHot, clipped_rectify, sigmoid, steeper_sigmoid, tanh, CosineDistance,\
last, softmax3d, params, Attention
import funktional.context as context
from funktional.layer import params
import imaginet.task as task
from funktional.util import autoassign
import funktional.util as util
from funktional.util import orthogonal, xavier, uniform
import theano.tensor as T
import theano
import zipfile
import numpy
import StringIO
import json
import cPickle as pickle
from theano.tensor.shared_randomstreams import RandomStreams
from imaginet.simple_data import vector_padder
class Encoder(Layer):
def __init__(self, size_vocab, _size_embed, size, depth, # TODODODO remove size_embed from this
residual=False, fixed=False, activation=clipped_rectify,
gate_activation=steeper_sigmoid, init_in=orthogonal, init_recur=orthogonal,
filter_length=6, filter_size=1024, stride=3): # FIXME use a more reasonable default
autoassign(locals())
self.Conv = Convolution1D(self.size_vocab, self.filter_length, self.filter_size, stride=self.stride)
self.GRU = StackedGRUH0(self.filter_size, self.size, self.depth,
activation=self.activation, residual=self.residual,
gate_activation=self.gate_activation,
init_in=self.init_in, init_recur=self.init_recur)
def params(self):
return params(self.Conv, self.GRU)
def __call__(self, input):
return self.GRU(self.Conv(input))
class Visual(task.Task):
def __init__(self, config):
autoassign(locals())
self.margin_size = config.get('margin_size', 0.2)
self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr'])
self.Encode = Encoder(config['size_vocab'],
config['size_embed'], config['size'],
config['depth'],
activation=eval(config.get('activation',
'clipped_rectify')),
gate_activation=eval(config.get('gate_activation', 'steeper_sigmoid')),
filter_length=config.get('filter_length', 6),
filter_size=config.get('filter_size', 1024),
stride=config.get('stride', 3),
residual=config.get('residual',False),
init_in=eval(config.get('init_in', 'orthogonal')),
init_recur=eval(config.get('init_recur', 'orthogonal')))
self.Attn = Attention(config['size'])
self.ImgEncoder = Dense(config['size_target'], config['size'])
self.inputs = [T.ftensor3()]
self.target = T.fmatrix()
def compile(self):
task.Task.compile(self)
self.encode_images = self._make_encode_images()
def params(self):
return params(self.Encode, self.Attn, self.ImgEncoder)
def __call__(self, input):
return util.l2norm(self.Attn(self.Encode(input)))
# FIXME HACK ALERT
def cost(self, i, s_encoded):
if self.config['contrastive']:
i_encoded = util.l2norm(self.ImgEncoder(i))
return util.contrastive(i_encoded, s_encoded, margin=self.margin_size)
else:
raise NotImplementedError
def args(self, item):
return (item['audio'], item['target_v'])
def _make_representation(self):
with context.context(training=False):
rep = self.Encode(*self.inputs)
return theano.function(self.inputs, rep)
def _make_pile(self):
with context.context(training=False):
rep = self.Encode.GRU.intermediate(self.Encode.Conv(*self.inputs))
return theano.function(self.inputs, rep)
def _make_encode_images(self):
images = T.fmatrix()
with context.context(training=False):
rep = util.l2norm(self.ImgEncoder(images))
return theano.function([images], rep)
def encode_sentences(model, audios, batch_size=128):
"""Project audios to the joint space using model.
For each audio returns a vector.
"""
return numpy.vstack([ model.task.predict(vector_padder(batch))
for batch in util.grouper(audios, batch_size) ])
def layer_states(model, audios, batch_size=128):
"""Pass audios through the model and for each audio return the state of each timestep and each layer."""
lens = (numpy.array(map(len, audios)) + model.config['filter_length']) // model.config['stride']
rs = [ r for batch in util.grouper(audios, batch_size) for r in model.task.pile(vector_padder(batch)) ]
return [ r[-l:,:,:] for (r,l) in zip(rs, lens) ]
def encode_images(model, imgs, batch_size=128):
"""Project imgs to the joint space using model.
"""
return numpy.vstack([ model.task.encode_images(batch)
for batch in util.grouper(imgs, batch_size) ])
def symbols(model):
return model.batcher.mapper.ids.decoder
| mit | 8,088,016,349,657,589,000 | 43.675 | 114 | 0.598582 | false |
johnwallace123/dx-toolkit | src/python/dxpy/scripts/dx_gff_to_genes.py | 1 | 20403 | #!/usr/bin/env python
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import dxpy
import sys
import re
import argparse
import subprocess
import magic
parser = argparse.ArgumentParser(description='Import a local GFF file as a Spans or Genes object.')
parser.add_argument('fileName', help='local fileName to import')
parser.add_argument('reference', help='ID of ContigSet object (reference) that this GFF file annotates')
parser.add_argument('--outputName', dest='outputName', default='', help='what to name the output. if none provided, the name of the input file will be used with gff file extension removed.')
parser.add_argument('--file_id', default=None, help='the DNAnexus file-id of the original file. If provided, a link to this id will be added in the type details')
parser.add_argument('--additional_type', default=[], action='append', help='This will be added to the list of object types (in addition to the type \"Spans\" or \"Genes\", which is added automatically')
parser.add_argument('--property_key', default=[], action='append', help='The keys in key-value pairs that will be added to the details of the object. The nth property key will be paired with the nth property value. The number of keys must equal the number of values provided')
parser.add_argument('--property_value', default=[], action='append', help='The values in key-value pairs that will be added to the details of the object. The nth property key will be paired with the nth property value. The number of keys must equal the number of values provided')
parser.add_argument('--tag', default=[], action='append', help='"A set of tags (string labels) that will be added to the resulting Variants table object. (You can use tags and properties to better describe and organize your data)')
def importGFF(**args):
if len(args) == 0:
args = parser.parse_args(sys.argv[1:])
fileName = args.fileName
reference = args.reference
outputName = args.outputName
file_id = args.file_id
property_key = args.property_key
property_value = args.property_value
tag = args.tag
additional_type = args.additional_type
else:
fileName = args['fileName']
reference = args['reference']
outputName = ''
if args.get('outputName') != None:
outputName = args['outputName']
tag = []
if args.get('tag'):
tag = args['tag']
if args.get('property_key') != None:
property_key = args['property_key']
if args.get('property_value') != None:
property_value = args['property_value']
if args.get('additional_type') != None:
additional_type = args['additional_type']
if args.get('file_id') != None:
file_id = args['file_id']
inputFileName = unpack(fileName)
#Rows of this type will not be written to the gtable as their information is fully encompassed by the rest of the data
discardedTypes = {"start_codon": True, "stop_codon": True}
##Isolate the attribute tags from the file and check integrity
spansTable, additionalColumns = constructTable(inputFileName)
details = {'original_contigset': dxpy.dxlink(reference)}
if file_id != None:
details['original_file'] = dxpy.dxlink(file_id)
if len(property_key) != len(property_value):
raise dxpy.AppError("Expected each provided property to have a corresponding value.")
for i in range(len(property_key)):
details[property_key[i]] = property_value[i]
spansTable.set_details(details)
spansTable.add_tags(tag)
if outputName == '':
spansTable.rename(fileName)
else:
spansTable.rename(outputName)
hasGenes = False
#This pass through the file calculates the gene and transcript models
genes = {}
transcripts = {}
spanId = 0
sequenceOntology = {}
for x in ["five_prime_UTR", "5' UTR", "five prime UTR", "five_prime_untranslated_region", "five_prime_coding_exon_noncoding_region", "five_prime_exon_noncoding_region", "five prime coding exon noncoding region"]:
sequenceOntology[x] = "5' UTR"
for x in ["three_prime_UTR", "3' UTR", "three prime UTR", "three_prime_untranslated_region", "three_prime_coding_exon_noncoding_region", "three_prime_exon_noncoding_region", "three prime coding exon noncoding region"]:
sequenceOntology[x] = "3' UTR"
for x in ["mRNA", "rRNA", "tRNA", "snRNA", "snoRNA", "miRNA", "ncRNA", "transcript", "mature_transcript", "rRNA_large_subunit_primary_transcript", "35S rRNA primary transcript", "rRNA large subunit primary transcript", "rRNA_primary_transcript", "enzymatic_RNA", "nc_primary_transcript", "scRNA", "protein_coding_primary_transcript", "antisense_RNA", "antisense_primary_transcript", "primary_transcript", "ribosomal_subunit_rRNA", "small subunit rRNA", "SSU RNA", "SSU rRNA", "large_subunit_rRNA", "LSU RNA", "LSU rRNA"]:
sequenceOntology[x] = "transcript"
for x in ["exon", "interior_coding_exon", "interior coding exon", "coding_exon", "coding exon", "five_prime_coding_exon_region", "five prime exon coding region", "three_prime_coding_exon_region", "three prime coding exon region", "five_prime_coding_exon", "three_prime_coding_exon", "non_coding_exon", "non coding exon"]:
sequenceOntology[x] = "exon"
isCoding = {}
for x in ["CDS", "interior_coding_exon", "interior coding exon", "coding_exon", "five_prime_coding_exon_region", "five prime exon coding region", "three_prime_coding_exon_region", "three prime coding exon region", "five_prime_coding_exon", "three_prime_coding_exon"]:
isCoding[x] = True
codingRegions = {}
spans = {}
inputFile = open(inputFileName, 'r')
for line in inputFile:
if line[0] != "#":
values = parseLine(line.split("#")[0])
if values["attributes"].get("Parent") != None:
for parent in values["attributes"]["Parent"].split(","):
if codingRegions.get(parent) == None:
codingRegions[parent] = {values["chromosome"]: {"codingLo": -1, "codingHi": -1} }
if isCoding.get(values["type"]) != None:
if values["lo"] < codingRegions[parent][values["chromosome"]]["codingLo"] or codingRegions[parent][values["chromosome"]]["codingLo"] == -1:
codingRegions[parent][values["chromosome"]]["codingLo"] = values["lo"]
if values["hi"] > codingRegions[parent][values["chromosome"]]["codingHi"] or codingRegions[parent][values["chromosome"]]["codingLo"] == -1:
codingRegions[parent][values["chromosome"]]["codingHi"] = values["hi"]
if values["attributes"].get("ID") != None:
spans[values["attributes"]["ID"]] = spanId
spanId += 1
inputFile = open(inputFileName, 'r')
overflowSpans = spanId
spanId = 0
for line in inputFile:
if line[0] != "#":
values = parseLine(line)
entryIsCoding = False
if isCoding.get(values["type"]) != None:
entryIsCoding = True
if values["attributes"].get("Name") != None:
name = values["attributes"]["Name"]
elif values["attributes"].get("name") != None:
name = values["attributes"]["name"]
elif values["attributes"].get("NAME") != None:
name = values["attributes"]["NAME"]
elif values["attributes"].get("ID") != None:
name = values["attributes"]["ID"]
else:
name = ''
if sequenceOntology.get(values["type"]) != None:
values["type"] = sequenceOntology[values["type"]]
hasGenes = True
description = ''
if values["attributes"].get("description") != None:
description = values["attributes"]["description"]
if values["attributes"].get("Description") != None:
description = values["attributes"]["description"]
parent = -1
if values["type"] not in discardedTypes:
if values["attributes"].get("Parent") != None:
parentSplit = values["attributes"]["Parent"].split(",")
else:
parentSplit = ["-1"]
for parent in parentSplit:
currentSpan = spanId
parentId = -1
if spans.get(parent) != None:
parentId = spans[parent]
if parentSplit.index(parent) > 0:
currentSpan = overflowSpans
overflowSpans += 1
for x in ["ID", "Parent"]:
if not entryIsCoding and values["attributes"].get(x) != None:
if codingRegions.get(values["attributes"][x]) != None:
if codingRegions[values["attributes"][x]].get("chromosome") != None:
if values["lo"] >= codingRegions[values["attributes"][x]]["chromosome"]["codingLo"] and values["lo"] <= codingRegions[values["attributes"][x]]["chromosome"]["codingHi"] and codingRegions[values["attributes"][x]]["chromosome"]["codingHi"] > -1 and codingRegions[values["attributes"][x]]["chromosome"]["codingHi"] > -1:
entryIsCoding = True
if values["hi"] >= codingRegions[values["attributes"][x]]["chromosome"]["codingLo"] and values["hi"] <= codingRegions[values["attributes"][x]]["chromosome"]["codingHi"] and codingRegions[values["attributes"][x]]["chromosome"]["codingHi"] > -1 and codingRegions[values["attributes"][x]]["chromosome"]["codingHi"] > -1:
entryIsCoding = True
entry = [values["chromosome"], values["lo"], values["hi"], name, currentSpan, values["type"], values["strand"], values["score"], entryIsCoding, parentId, values["frame"], description, values["source"]]
for x in additionalColumns:
if values["attributes"].get(x) != None:
entry.append(values["attributes"][x])
else:
entry.append('')
spansTable.add_rows([entry])
spanId += 1
if hasGenes:
types = ["Genes", "gri"]
else:
types = ["Spans", "gri"]
for x in additional_type:
types.append(x)
spansTable.add_types(types)
spansTable.flush()
spansTable.close()
print(spansTable.get_id())
job_outputs = dxpy.dxlink(spansTable.get_id())
return job_outputs
def writeEntry(spansTable, spanId, exonInfo, additionalColumns, chromosome, lo, hi, attributes, entry):
if [lo, hi] not in exonInfo[chromosome]:
exonInfo[chromosome].append([lo, hi])
spanId += 1
for x in additionalColumns:
if attributes.get(x) != None:
entry.append(attributes[x])
else:
entry.append('')
spansTable.add_rows([entry])
return spanId
def splitExons(transcriptInfo, chromosome, lo, hi):
result = [["CDS", lo, hi]]
if lo < transcriptInfo[chromosome]["codingLo"]:
result[0][1] = transcriptInfo[chromosome]["codingLo"]
result.append(["5' UTR", lo, transcriptInfo[chromosome]["codingLo"]])
if hi > transcriptInfo[chromosome]["codingHi"]:
result[0][2] = transcriptInfo[chromosome]["codingHi"]
result.append(["3' UTR", transcriptInfo[chromosome]["codingHi"], hi])
return result
def parseLine(line):
line = line.strip().split("#")[0]
tabSplit = line.split("\t")
if len(tabSplit) == 1:
tabSplit = line.split(" ")
if len(tabSplit) < 8:
raise dxpy.AppError("One row did not have 8 or 9 entries, it had 1 instead. Offending line: " + line)
tabSplit[8] = " ".join(tabSplit[8:])
tabSplit = tabSplit[:9]
chromosome = tabSplit[0]
source = tabSplit[1]
typ = tabSplit[2]
try:
lo = int(tabSplit[3])-1
except ValueError:
raise dxpy.AppError("One of the start values was could not be translated to an integer. " + "\nOffending line: " + line + "\nOffending value: " + tabSplit[3])
try:
hi = int(tabSplit[4])
except ValueError:
raise dxpy.AppError("One of the start values was could not be translated to an integer. " + "\nOffending line: " + line + "\nOffending value: " + tabSplit[4])
try:
score = float(tabSplit[5])
except ValueError:
if tabSplit[5] == "." or tabSplit[5] == '':
score = dxpy.NULL
else:
raise dxpy.AppError("The score for one line could not be translated into a number and was not \".\"" + "\nOffending line: " + line + "\nOffending value: " + tabSplit[5])
tabSplit[6] = tabSplit[6].replace("?", ".")
if tabSplit[6] != "+" and tabSplit[6] != "-" and tabSplit[6] != ".":
raise dxpy.AppError("The strand indicated for an element was not \"+\", \"-\", \"?\", or \".\"" + "\nOffending line: " + line + "\nOffending value: " + tabSplit[6])
else:
strand = tabSplit[6]
try:
frame = int(tabSplit[7])
if frame > 2 or frame < 0:
raise dxpy.AppError("The frame indicated for an element was not \".\", \"0\", \"1\", or \"2\"" + "\nOffending line: " + line + "\nOffending value: " + tabSplit[7])
except ValueError:
if tabSplit[7] == ".":
frame = -1
else:
raise dxpy.AppError("The frame indicated for an element was not \".\", \"0\", \"1\", or \"2\"" + "\nOffending line: " + line + "\nOffending value: " + tabSplit[7])
lineAttributes = {}
##Extract the attributes from the file
if len(tabSplit) >= 9:
reg = re.findall("([^=]*)=([^;]*);", tabSplit[8].strip() + ";")
for x in reg:
if len(x[0]) < 100:
lineAttributes[x[0]] = x[1].strip().strip("\"")
else:
lineAttributes = {}
values = {"chromosome": chromosome, "lo": lo, "hi": hi, "source": source, "type": typ, "strand": strand, "score": score, "frame": frame, "attributes": lineAttributes}
return values
def constructTable(inputFileName):
inputFile = open(inputFileName, 'r')
attributes = {}
for line in inputFile:
if line[0] != "#":
line = line.strip().split("#")[0]
tabSplit = line.split("\t")
if len(tabSplit) == 1:
tabSplit = line.split(" ")
if len(tabSplit) < 9:
raise dxpy.AppError("One row did not have 8 or 9 entries, it had 1 instead. Offending line: " + line)
tabSplit[8] = " ".join(tabSplit[8:])
tabSplit = tabSplit[:9]
if len(tabSplit) != 8 and len(tabSplit) != 9:
raise dxpy.AppError("One row did not have 8 or 9 entries, it had " + str(len(tabSplit)) + " instead. Offending line: " + line)
elif len(tabSplit) == 9:
reg = re.findall("([^=]*)=([^;]*);", tabSplit[8].strip() + ";")
for x in reg:
attributes[x[0]] = True
reservedColumns = ["", "chr", "lo", "hi", "name", "span_id", "type", "score", "is_coding", "parent_id", "frame", "description", "source"]
#Construct table
schema = [
{"name": "chr", "type": "string"},
{"name": "lo", "type": "uint32"},
{"name": "hi", "type": "uint32"},
{"name": "name", "type": "string"},
{"name": "span_id", "type": "int32"},
{"name": "type", "type": "string"},
{"name": "strand", "type": "string"},
{"name": "score", "type": "float"},
{"name": "is_coding", "type": "boolean"},
{"name": "parent_id", "type": "int32"},
{"name": "frame", "type": "int16"},
{"name": "description", "type": "string"},
{"name": "source", "type": "string"}]
additionalColumns = []
for k, v in attributes.iteritems():
if k not in reservedColumns and len(k) < 100:
schema.append({"name": k, "type": "string"})
additionalColumns.append(k)
indices = [dxpy.DXGTable.genomic_range_index("chr","lo","hi", 'gri'),
dxpy.DXGTable.lexicographic_index([
dxpy.DXGTable.lexicographic_index_column("name", True, False),
dxpy.DXGTable.lexicographic_index_column("chr"),
dxpy.DXGTable.lexicographic_index_column("lo"),
dxpy.DXGTable.lexicographic_index_column("hi"),
dxpy.DXGTable.lexicographic_index_column("type")], "search")]
spansTable = dxpy.new_dxgtable(columns=schema, indices=indices)
return spansTable, additionalColumns
def unpack(input):
m = magic.Magic()
# determine compression format
try:
file_type = m.from_file(input)
except:
raise dxpy.AppError("Unable to identify compression format")
# if we find a tar file throw a program error telling the user to unpack it
if file_type == 'application/x-tar':
raise dxpy.AppError("App does not support tar files. Please unpack.")
# since we haven't returned, the file is compressed. Determine what program to use to uncompress
uncomp_util = None
if file_type == 'XZ compressed data':
uncomp_util = 'xzcat'
elif file_type[:21] == 'bzip2 compressed data':
uncomp_util = 'bzcat'
elif file_type[:20] == 'gzip compressed data':
uncomp_util = 'zcat'
elif file_type == 'POSIX tar archive (GNU)' or 'tar' in file_type:
raise dxpy.AppError("Found a tar archive. Please untar your sequences before importing")
else:
# just return input filename since it's already uncompressed
return input
if uncomp_util != None:
# bzcat does not support -t. Use non streaming decompressors for testing input
test_util = None
if uncomp_util == 'xzcat':
test_util = 'xz'
elif uncomp_util == 'bzcat':
test_util = 'bzip2'
elif uncomp_util == 'zcat':
test_util = 'gzip'
try:
subprocess.check_call(" ".join([test_util, "-t", input]), shell=True)
except subprocess.CalledProcessError:
raise dxpy.AppError("File failed integrity check by "+uncomp_util+". Compressed file is corrupted.")
# with that in hand, unzip file. If we find a tar archive then exit with error.
try:
with subprocess.Popen([uncomp_util, input], stdout=subprocess.PIPE).stdout as pipe:
line = pipe.next()
uncomp_type = m.from_buffer(line)
except:
raise dxpy.AppError("Error detecting file format after decompression")
if uncomp_type == 'POSIX tar archive (GNU)' or 'tar' in uncomp_type:
raise dxpy.AppError("Found a tar archive after decompression. Please untar your files before importing")
elif 'ASCII text' not in uncomp_type:
raise dxpy.AppError("After decompression found file type other than plain text")
try:
subprocess.check_call(" ".join([uncomp_util, "--stdout", input, ">", "uncompressed.gff"]), shell=True)
return "uncompressed.gff"
except subprocess.CalledProcessError:
raise dxpy.AppError("Unable to open compressed input for reading")
def main(**args):
return importGFF(**args)
if __name__ == '__main__':
importGFF()
| apache-2.0 | -2,737,472,759,782,336,000 | 47.578571 | 525 | 0.590354 | false |
openatv/enigma2 | lib/python/Components/FileTransfer.py | 2 | 3044 | # -*- coding: utf-8 -*-
from Components.Task import Task, Job, job_manager, AbortedPostcondition, ReturncodePostcondition
from Tools.Directories import fileExists, shellquote
from Components.MovieList import MOVIE_EXTENSIONS
from enigma import eTimer
import os
ALL_MOVIE_EXTENSIONS = MOVIE_EXTENSIONS.union((".ts",))
class FileTransferJob(Job):
def __init__(self, src_file, dst_file, src_isDir, do_copy, title):
Job.__init__(self, title)
FileTransferTask(self, src_file, dst_file, src_isDir, do_copy)
class FileTransferTask(Task):
def __init__(self, job, src_file, dst_file, src_isDir, do_copy):
Task.__init__(self, job, "")
nice = "ionice -c 3"
self.src_isDir = src_isDir
self.src_file = src_file
self.dst_isDir = False
self.dst_file = dst_file + "/" + os.path.basename(src_file)
src_file_append = ""
if not src_isDir:
root, ext = os.path.splitext(src_file)
if ext in ALL_MOVIE_EXTENSIONS:
src_file = root
src_file_append = ".*"
cmd = "mv"
if do_copy:
cmd = "cp -pr"
cmdline = '%s %s %s%s %s' % (nice, cmd, shellquote(src_file), src_file_append, shellquote(dst_file))
if self.dst_file.endswith("/"):
self.dst_isDir = True
self.setCmdline(cmdline)
self.end = 100
self.progressTimer = eTimer()
self.progressTimer.callback.append(self.progressUpdate)
def progressUpdate(self):
if not fileExists(self.dst_file, 'r'):
return
if self.dst_isDir:
dst_dir_size = self.dst_file
if self.src_isDir and self.src_file.endswith("/"):
mv_dir = self.src_file[:-1].rsplit("/", 1)
if len(mv_dir) == 2:
dst_dir_size = self.dst_file + mv_dir[1]
dst_size = float(self.dirSize(dst_dir_size))
else:
dst_size = float(os.path.getsize(self.dst_file))
progress = dst_size / self.src_size * 100.0
self.setProgress(progress)
self.progressTimer.start(self.updateTime, True)
def prepare(self):
if fileExists(self.src_file, 'r'):
if self.src_isDir:
self.src_size = float(self.dirSize(self.src_file))
else:
self.src_size = float(os.path.getsize(self.src_file))
self.updateTime = max(1000, int(self.src_size * 0.000001 * 0.5)) # based on 20Mb/s transfer rate
self.progressTimer.start(self.updateTime, True)
def afterRun(self):
self.progressTimer.stop()
self.setProgress(100)
def dirSize(self, folder):
total_size = os.path.getsize(folder)
for item in os.listdir(folder):
itempath = os.path.join(folder, item)
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += self.dirSize(itempath)
return total_size
def finish(self, aborted=False):
self.afterRun()
not_met = []
if aborted:
from Tools import Notifications
from Screens.MessageBox import MessageBox
Notifications.AddNotification(MessageBox, _("File transfer was cancelled by user"), type=MessageBox.TYPE_INFO)
else:
for postcondition in self.postconditions:
if not postcondition.check(self):
not_met.append(postcondition)
self.cleanup(not_met)
self.callback(self, not_met)
| gpl-2.0 | 764,602,600,391,465,300 | 32.450549 | 113 | 0.692181 | false |
CityOfPhiladelphia/myphillyrising | website/alexander/models.py | 1 | 7341 | from django.conf import settings
from django.db import models
from django.db.models import query
from django.utils.translation import ugettext as _
from django.utils.timezone import now, timedelta
from geopy import geocoders
from .feed_readers import get_feed_reader
import logging
logger = logging.getLogger(__name__)
class FeedQuerySet (query.QuerySet):
def refresh(self):
for feed in self:
feed.refresh()
class FeedManager (models.Manager):
def get_query_set(self):
return FeedQuerySet(self.model, using=self._db)
def refresh(self):
return self.all().refresh()
class Feed (models.Model):
title = models.CharField(max_length=100)
last_read_at = models.DateTimeField(null=True, blank=True)
is_trusted = models.BooleanField(default=False)
# ----------
# Information about the original feed
source_url = models.URLField()
source_type = models.CharField(max_length=20)
# ----------
# Defaults for the content items retrieved from this feed
default_category = models.CharField(max_length=20)
default_tags = models.ManyToManyField('ContentTag', blank=True)
objects = FeedManager()
def __unicode__(self):
return self.title
def get_items(self, **kwargs):
return self.items.filter(**kwargs)
def make_new_items(self, count, **kwargs):
items = []
for index in xrange(count):
item = ContentItem(feed=self, category=self.default_category, **kwargs)
items.append(item)
return items
def refresh(self):
"""
Update a feed instance from its source URL if there have been any
changes to the feed's items.
"""
feed_source = get_feed_reader(self.source_type, url=self.source_url)
changed_items = []
new_items = []
all_items = []
is_new = lambda item: item.pk is None
seen_source_ids = set()
self.errors = []
# Loop through each item from the source
for item_source in feed_source:
# Get the source id and the expected number of corresponding items
source_id = feed_source.get_item_id(item_source)
occurrence_count, from_dt, until_dt = feed_source.get_occurrence_count(item_source)
# If date filters were returned, add them as filter parameters
extra_params = {}
if from_dt:
extra_params['displayed_from__gte'] = from_dt
if until_dt:
extra_params['displayed_until__lte'] = until_dt
# Get the item model(s) corresponding to the source data
items = list(self.get_items(source_id=source_id, **extra_params))
existing_count = len(items)
if existing_count < occurrence_count:
items.extend(self.make_new_items(occurrence_count - existing_count))
# If it is new or has changed, update the model with the source
# data
new_flags = [is_new(item) for item in items]
changed_flags = [feed_source.is_different(item, item_source) for item in items]
has_new = any(new_flags)
has_changed = any(changed_flags)
if has_new or has_changed:
try:
feed_source.update_items(items, item_source)
except ValueError as exc:
error_str = ('Failed to update the feed %s items %s from '
'the source item with id %s: %s' %
(self, items, source_id, exc))
logger.error(error_str)
self.errors.append(error_str)
continue
# Save all the changed items
for item in items:
item.save()
changed_items.extend(items)
# Apply tags to everything that's new
for item, item_is_new in zip(items, new_flags):
if not item_is_new:
continue
tags = tuple(item.feed.default_tags.all())
item.tags.add(*tags)
# Auto publish if this is a trusted feed
if self.is_trusted:
item.status = 'published'
item.save()
# No matter what, note that we've seen the item
seen_source_ids.add(source_id)
all_items.extend((item, has_new or has_changed) for item in items)
self.last_read_at = now()
self.save()
# ICS feeds return a complete set of events each time, so we might as
# well clean out events that no longer exist.
if self.source_type.lower() in ('ical', 'ics'):
all_source_ids = set(
item.source_id
for item in self.items.all()
)
unseen_source_ids = all_source_ids - seen_source_ids
self.items.filter(source_id__in=unseen_source_ids).delete()
return all_items
class ContentItem (models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=160, null=True, blank=True)
tags = models.ManyToManyField('ContentTag', related_name='items')
category = models.CharField(max_length=20)
is_featured = models.BooleanField(default=False)
displayed_from = models.DateTimeField()
displayed_until = models.DateTimeField(null=True, blank=True)
status = models.CharField(max_length=20, default='pending')
# Optional location information
address = models.CharField(max_length=1000, default='', blank=True)
lat = models.FloatField(null=True, blank=True)
lng = models.FloatField(null=True, blank=True)
# Information about the original source content
feed = models.ForeignKey('Feed', related_name='items')
source_id = models.CharField(max_length=1000)
source_url = models.URLField()
source_content = models.TextField()
def __unicode__(self):
return self.title or self.source_url
class Meta:
ordering = ('-displayed_from',)
def geocode(self, commit=True):
if not self.address:
return
geocoder = geocoders.GoogleV3()
# Increase specificity of the address if it's not specific already.
address = self.address
if settings.GEOCODER['CITY'].lower() not in address.lower():
address += ', ' + settings.GEOCODER['CITY']
address += ', ' + settings.GEOCODER['STATE']
# The geocode method may raise an exception. See
# https://github.com/geopy/geopy/blob/master/geopy/geocoders/googlev3.py#L193
# for possibilities.
results = geocoder.geocode(
address,
bounds=settings.GEOCODER['BOUNDS'],
region=settings.GEOCODER['REGION'],
exactly_one=False
)
if (len(results) > 0):
place, (self.lat, self.lng) = results[0]
if commit:
self.save()
else:
logger.debug('Found no locations for address %r' % (address,))
class ContentTag (models.Model):
label = models.CharField(max_length=100, primary_key=True)
def __unicode__(self):
return self.label
| gpl-3.0 | 2,756,117,078,917,034,500 | 34.124402 | 95 | 0.594197 | false |
tekton/DocuCanvas | nodes/migrations/0001_initial.py | 1 | 3181 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Node'
db.create_table(u'nodes_node', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.Project'])),
('number', self.gf('django.db.models.fields.IntegerField')(default=0)),
('section', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.Section'], null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('slug_title', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, blank=True)),
('slug_description', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, blank=True)),
('description_preview', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'nodes', ['Node'])
def backwards(self, orm):
# Deleting model 'Node'
db.delete_table(u'nodes_node')
models = {
u'nodes.node': {
'Meta': {'object_name': 'Node'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_preview': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Section']", 'null': 'True', 'blank': 'True'}),
'slug_description': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'slug_title': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'projects.project': {
'Meta': {'object_name': 'Project'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'projects.section': {
'Meta': {'object_name': 'Section'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"})
}
}
complete_apps = ['nodes'] | gpl-3.0 | -5,484,230,721,921,663,000 | 54.824561 | 143 | 0.57309 | false |
endlessm/chromium-browser | third_party/chromite/third_party/infra_libs/buildbucket/proto/config/service_config_pb2.py | 2 | 4583 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: service_config.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='service_config.proto',
package='buildbucket',
syntax='proto3',
serialized_pb=_b(
'\n\x14service_config.proto\x12\x0b\x62uildbucket\">\n\x0bSettingsCfg\x12/\n\x08swarming\x18\x01 \x01(\x0b\x32\x1d.buildbucket.SwarmingSettings\"`\n\x10SwarmingSettings\x12\x15\n\rmilo_hostname\x18\x02 \x01(\t\x12/\n\'default_task_template_canary_percentage\x18\x03 \x01(\x05J\x04\x08\x01\x10\x02\x42\x38Z6go.chromium.org/luci/buildbucket/proto/config;configpbb\x06proto3'
)
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETTINGSCFG = _descriptor.Descriptor(
name='SettingsCfg',
full_name='buildbucket.SettingsCfg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='swarming',
full_name='buildbucket.SettingsCfg.swarming',
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=37,
serialized_end=99,
)
_SWARMINGSETTINGS = _descriptor.Descriptor(
name='SwarmingSettings',
full_name='buildbucket.SwarmingSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='milo_hostname',
full_name='buildbucket.SwarmingSettings.milo_hostname',
index=0,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='default_task_template_canary_percentage',
full_name=
'buildbucket.SwarmingSettings.default_task_template_canary_percentage',
index=1,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=101,
serialized_end=197,
)
_SETTINGSCFG.fields_by_name['swarming'].message_type = _SWARMINGSETTINGS
DESCRIPTOR.message_types_by_name['SettingsCfg'] = _SETTINGSCFG
DESCRIPTOR.message_types_by_name['SwarmingSettings'] = _SWARMINGSETTINGS
SettingsCfg = _reflection.GeneratedProtocolMessageType(
'SettingsCfg',
(_message.Message,),
dict(
DESCRIPTOR=_SETTINGSCFG,
__module__='service_config_pb2'
# @@protoc_insertion_point(class_scope:buildbucket.SettingsCfg)
)
)
_sym_db.RegisterMessage(SettingsCfg)
SwarmingSettings = _reflection.GeneratedProtocolMessageType(
'SwarmingSettings',
(_message.Message,),
dict(
DESCRIPTOR=_SWARMINGSETTINGS,
__module__='service_config_pb2'
# @@protoc_insertion_point(class_scope:buildbucket.SwarmingSettings)
)
)
_sym_db.RegisterMessage(SwarmingSettings)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b('Z6go.chromium.org/luci/buildbucket/proto/config;configpb')
)
# @@protoc_insertion_point(module_scope)
| bsd-3-clause | -2,954,709,114,080,390,000 | 29.966216 | 380 | 0.632555 | false |
scholer/cadnano2.5 | cadnano/document.py | 2 | 34764 | # -*- coding: utf-8 -*-
from operator import itemgetter
from uuid import uuid4
from typing import (
Set,
List,
Tuple,
Iterable,
Iterator,
Optional
)
from cadnano import (
app,
setBatch,
util
)
from cadnano.addinstancecmd import AddInstanceCommand
from cadnano.proxies.cnenum import (
EnumType,
GridEnum,
ModEnum,
PointEnum,
ViewSendEnum
)
from cadnano.proxies.cnobject import CNObject
from cadnano.objectinstance import ObjectInstance
from cadnano.proxies.cnproxy import (
ProxySignal,
UndoStack
)
from cadnano.docmodscmd import (
AddModCommand,
ModifyModCommand,
RemoveModCommand
)
from cadnano.fileio.decode import decodeFile
from cadnano.fileio.encode import encodeToFile
from cadnano.part import Part
from cadnano.part.nucleicacidpart import NucleicAcidPart
from cadnano.part.refreshsegmentscmd import RefreshSegmentsCommand
from cadnano.oligo import Oligo
from cadnano.strandset import StrandSet
from cadnano.strand import Strand
from cadnano.cntypes import (
DocCtrlT,
DocT,
WindowT
)
# Type Aliases
EndsSelected = Tuple[bool, bool]
class Document(CNObject):
"""
The Document class is the root of the model. It has two main purposes:
1. Serve as the parent all Part objects within the model.
2. Track all sub-model actions on its undoStack.
Args:
parent (CNObject): optional, defaults to None
Attributes:
view_names (list): views the document should support
filter_set (set): filters that should be applied when selecting.
"""
def __init__(self, parent=None):
super(Document, self).__init__(parent)
self._undostack = us = UndoStack() # notice NO parent, what does this mean?
us.setUndoLimit(30)
self._children = set() # for storing a reference to Parts (and Assemblies)
self._instances = set() # for storing instances of Parts (and Assemblies)
self._app_window = None
# the dictionary maintains what is selected
self._selection_dict = {}
self._active_part = None
self._filename = None
# the added list is what was recently selected or deselected
self._strand_selected_changed_dict = {}
self.view_names = []
self.filter_set: Set[str] = set()
self._mods = {} # modifications keyed by mod id
this_app = app()
this_app.documentWasCreatedSignal.emit(self)
# end def
# SIGNALS #
# Signal 1. Connected to the ViewRoots
documentPartAddedSignal = ProxySignal(object, CNObject, name='documentPartAddedSignal')
"""`Document`, `Part`"""
documentAssemblyAddedSignal = ProxySignal(object, CNObject, name='documentAssemblyAddedSignal')
"""`Document`, `Assembly`"""
documentSelectionFilterChangedSignal = ProxySignal(object, name='documentSelectionFilterChangedSignal')
documentPreXoverFilterChangedSignal = ProxySignal(str, name='documentPreXoverFilterChangedSignal')
documentViewResetSignal = ProxySignal(CNObject, name='documentViewResetSignal')
documentClearSelectionsSignal = ProxySignal(CNObject, name='documentClearSelectionsSignal')
documentChangeViewSignalingSignal = ProxySignal(int, name='documentChangeViewSignalingSignal')
# Signal 1. Connected to the ModTool
documentModAddedSignal = ProxySignal(object, object, object, name='documentModAddedSignal')
documentModRemovedSignal = ProxySignal(object, object, name='documentModRemovedSignal')
documentModChangedSignal = ProxySignal(object, object, object, name='documentModChangedSignal')
# SLOTS #
# ACCESSORS #
def undoStack(self) -> UndoStack:
"""This is the actual undoStack to use for all commands. Any children
needing to perform commands should just ask their parent for the
undoStack, and eventually the request will get here.
"""
return self._undostack
def children(self) -> Set[CNObject]:
"""Returns a list of parts associated with the document.
Returns:
list: list of all child objects
"""
return self._children
def addRefObj(self, child: CNObject):
"""For adding Part and Assembly object references
Args:
child (object):
"""
self._children.add(child)
def addInstance(self, instance: ObjectInstance):
"""Add an ObjectInstance to the list of instances
Args:
instance:
"""
self._instances.add(instance)
def removeInstance(self, instance: ObjectInstance):
""" Remove an ObjectInstance from the list of instances
Args:
instance:
"""
self._instances.remove(instance)
self.documentClearSelectionsSignal.emit(self)
def removeAllChildren(self):
"""Used to reset the document. Not undoable."""
self.documentClearSelectionsSignal.emit(self)
for child in list(self._children):
child.remove(use_undostack=True)
self.undoStack().clear()
self.deactivateActivePart()
# end def
def setFilterSet(self, filter_list: List[str]):
""" Set the Document filter list.
Emits `documentSelectionFilterChangedSignal`
Args:
filter_list: list of filter key names
"""
assert isinstance(filter_list, list)
vhkey = 'virtual_helix'
fs = self.filter_set
if vhkey in filter_list and vhkey not in fs:
self.clearAllSelected()
if vhkey in fs and vhkey not in filter_list:
self.clearAllSelected()
self.filter_set = fs = set(filter_list)
self.documentSelectionFilterChangedSignal.emit(fs)
# end def
def removeRefObj(self, child: CNObject):
""" Remove child Part or Assembly
Args:
child:
"""
self._children.remove(child)
# end def
def activePart(self) -> Part:
return self._active_part
# end def
def setActivePart(self, part: Part):
self._active_part = part
# end def
def deactivateActivePart(self):
self._active_part = None
# end def
def changeViewSignaling(self, signal_enum: int = ViewSendEnum.ALL):
'''Turn on and off viwe signaling for enabled slots in views.
Signals the root item in each view
Arg:
signal_enum: Default turns on all views signals
'''
self.documentChangeViewSignalingSignal.emit(signal_enum)
# end def
def fileName(self) -> str:
return self._filename
# end def
def setFileName(self, fname: str):
self._filename = fname
# end def
def writeToFile(self, filename: str, legacy: bool = False):
""" Convenience wrapper for `encodeToFile` to set the `document`
argument to `self`
Args:
filename: full path file name
legacy: attempt to export cadnano2 format
"""
encodeToFile(filename, self, legacy)
# end def
def readFile(self, filename: str) -> DocT:
"""Convenience wrapper for ``decodeFile`` to always emit_signals and
set the ``document`` argument to ``self``
Args:
filename: full path file name
Returns:
self ``Document`` object with data decoded from ``filename``
"""
print("reading file", filename)
return decodeFile(filename, document=self, emit_signals=True)
# end def
# def assemblies(self):
# """Returns a list of assemblies associated with the document."""
# return self._assemblies
# PUBLIC METHODS FOR QUERYING THE MODEL #
def addStrandToSelection(self, strand: Strand, value: EndsSelected):
""" Add `Strand` object to Document selection
Args:
strand:
value: of the form::
(is low index selected, is high index selected)
"""
ss = strand.strandSet()
if ss in self._selection_dict:
self._selection_dict[ss][strand] = value
else:
self._selection_dict[ss] = {strand: value}
self._strand_selected_changed_dict[strand] = value
# end def
def removeStrandFromSelection(self, strand: Strand) -> bool:
"""Remove ``Strand`` object from Document selection
Args:
strand:
Returns:
``True`` if successful, ``False`` otherwise
"""
ss = strand.strandSet()
if ss in self._selection_dict:
temp = self._selection_dict[ss]
if strand in temp:
del temp[strand]
if len(temp) == 0:
del self._selection_dict[ss]
self._strand_selected_changed_dict[strand] = (False, False)
return True
else:
return False
else:
return False
# end def
def addVirtualHelicesToSelection(self, part: Part, id_nums: Iterable[int]):
"""If the ``Part`` isn't in the ``_selection_dict`` its not
going to be in the changed_dict either, so go ahead and add
Args:
part: The Part
id_nums: List of virtual helix ID numbers
"""
selection_dict = self._selection_dict
if part not in selection_dict:
selection_dict[part] = s_set = set()
else:
s_set = selection_dict[part]
changed_set = set()
for id_num in id_nums:
if id_num not in s_set:
s_set.add(id_num)
changed_set.add(id_num)
if len(changed_set) > 0:
part.partVirtualHelicesSelectedSignal.emit(part, changed_set, True)
# end def
def removeVirtualHelicesFromSelection(self, part: Part, id_nums: Iterable[int]):
"""Remove from the ``Part`` selection the ``VirtualHelix`` objects
specified by id_nums.
Args:
part:
id_nums:
"""
# print("remove called", id(part), id_nums, self._selection_dict.get(part))
selection_dict = self._selection_dict
if part in selection_dict:
s_set = selection_dict[part]
changed_set = set()
for id_num in id_nums:
if id_num in s_set:
s_set.remove(id_num)
if len(s_set) == 0:
del selection_dict[part]
changed_set.add(id_num)
if len(changed_set) > 0:
part.partVirtualHelicesSelectedSignal.emit(part, changed_set, False)
# end def
def selectedOligos(self) -> Set[Oligo]:
"""As long as one endpoint of a strand is in the selection, then the
oligo is considered selected.
Returns:
Set of zero or more selected :obj:`Oligos`
"""
s_dict = self._selection_dict
selected_oligos = set()
for ss in s_dict.keys():
for strand in ss:
selected_oligos.add(strand.oligo())
# end for
# end for
return selected_oligos
# end def
def clearAllSelected(self):
"""Clear all selections
emits documentClearSelectionsSignal
"""
# print("clearAllSelected")
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._strand_selected_changed_dict = {}
self.documentClearSelectionsSignal.emit(self)
# end def
def isModelStrandSelected(self, strand: Strand) -> bool:
ss = strand.strandSet()
if ss in self._selection_dict:
if strand in self._selection_dict[ss]:
return True
else:
return False
else:
return False
# end def
def isVirtualHelixSelected(self, part: Part, id_num: int) -> bool:
"""For a given ``Part``
Args:
part: ``Part`` in question
id_num: ID number of a virtual helix
Returns:
``True`` if ``id_num`` is selected else ``False``
"""
if part in self._selection_dict:
return id_num in self._selection_dict[part]
else:
return False
# end def
def isOligoSelected(self, oligo: Oligo) -> bool:
"""Determine if given ``Oligo`` is selected
Args:
oligo: ``Oligo`` object
Returns:
``True`` if ``oligo`` is selected otherwise ``False``
"""
strand5p = oligo.strand5p()
for strand in strand5p.generator3pStrand():
if self.isModelStrandSelected(strand):
return True
return False
# end def
def selectOligo(self, oligo: Oligo):
"""Select given ``Oligo``
Args:
oligo: ``Oligo`` object
"""
strand5p = oligo.strand5p()
both_ends = (True, True)
for strand in strand5p.generator3pStrand():
self.addStrandToSelection(strand, both_ends)
self.updateStrandSelection()
# end def
def deselectOligo(self, oligo: Oligo):
"""Deselect given ``Oligo``
Args:
oligo: ``Oligo`` object
"""
strand5p = oligo.strand5p()
for strand in strand5p.generator3pStrand():
self.removeStrandFromSelection(strand)
self.updateStrandSelection()
# end def
def getSelectedStrandValue(self, strand: Strand) -> EndsSelected:
"""Strand is an object to look up
it is pre-vetted to be in the dictionary
Args:
strand: ``Strand`` object in question
Returns:
Tuple of the end point selection
"""
return self._selection_dict[strand.strandSet()][strand]
# end def
def sortedSelectedStrands(self, strandset: StrandSet) -> List[Strand]:
"""Get a list sorted from low to high index of `Strands` in a `StrandSet`
that are selected
Args:
strandset: :obj:`StrandSet` to get selected strands from
Returns:
List of :obj:`Strand`s
"""
out_list = [x for x in self._selection_dict[strandset].items()]
def getLowIdx(x): return Strand.lowIdx(itemgetter(0)(x))
out_list.sort(key=getLowIdx)
return out_list
# end def
def determineStrandSetBounds(self, selected_strand_list: List[Tuple[Strand, EndsSelected]],
strandset: StrandSet) -> Tuple[int, int]:
"""Determine the bounds of a :class:`StrandSet` ``strandset`` among a
a list of selected strands in that same ``strandset``
Args:
selected_strand_list: list of ``( Strands, (is_low, is_high) )`` items
strandset: of interest
Returns:
tuple: min low bound and min high bound index
"""
length = strandset.length()
min_high_delta = min_low_delta = max_ss_idx = length - 1 # init the return values
ss_dict = self._selection_dict[strandset]
for strand, value in selected_strand_list:
idx_low, idx_high = strand.idxs()
low_neighbor, high_neighbor = strandset.getNeighbors(strand)
# print(low_neighbor, high_neighbor)
if value[0]: # the end is selected
if low_neighbor is None:
temp = idx_low - 0
else:
if low_neighbor in ss_dict:
value_N = ss_dict[low_neighbor]
# we only care if the low neighbor is not selected
temp = min_low_delta if value_N[1] else idx_low - low_neighbor.highIdx() - 1
# end if
else: # not selected
temp = idx_low - low_neighbor.highIdx() - 1
# end else
if temp < min_low_delta:
min_low_delta = temp
# end if
# check the other end of the strand
if not value[1]:
temp = idx_high - idx_low - 1
if temp < min_high_delta:
min_high_delta = temp
# end if
if value[1]:
if high_neighbor is None:
temp = max_ss_idx - idx_high
else:
if high_neighbor in ss_dict:
value_N = ss_dict[high_neighbor]
# we only care if the low neighbor is not selected
temp = min_high_delta if value_N[0] else high_neighbor.lowIdx() - idx_high - 1
# end if
else: # not selected
temp = high_neighbor.lowIdx() - idx_high - 1
# end else
# end else
if temp < min_high_delta:
min_high_delta = temp
# end if
# check the other end of the strand
if not value[0]:
temp = idx_high - idx_low - 1
if temp < min_low_delta:
min_low_delta = temp
# end if
# end for
return (min_low_delta, min_high_delta)
# end def
def getSelectionBounds(self) -> Tuple[int, int]:
"""Get the index bounds of a strand selection
Returns:
tuple: of :obj:`int`
"""
min_low_delta = -1
min_high_delta = -1
for strandset in self._selection_dict.keys():
selected_list = self.sortedSelectedStrands(strandset)
temp_low, temp_high = self.determineStrandSetBounds(selected_list, strandset)
if temp_low < min_low_delta or min_low_delta < 0:
min_low_delta = temp_low
if temp_high < min_high_delta or min_high_delta < 0:
min_high_delta = temp_high
return (min_low_delta, min_high_delta)
# end def
def deleteStrandSelection(self, use_undostack: bool = True):
"""Delete selected strands. First iterates through all selected strands
and extracts refs to xovers and strands. Next, calls removeXover
on xoverlist as part of its own macroed command for isoluation
purposes. Finally, calls removeStrand on all strands that were
fully selected (low and high), or had at least one non-xover
endpoint selected.
"""
xoList = []
strand_dict = {}
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
part = strand.part()
idx_low, idx_high = strand.idxs()
strand5p = strand.connection5p()
strand3p = strand.connection3p()
# both ends are selected
strand_dict[strand] = selected[0] and selected[1]
# only look at 3' ends to handle xover deletion
sel3p = selected[0] if idx_low == strand.idx3Prime() else selected[1]
if sel3p: # is idx3p selected?
if strand3p: # is there an xover
xoList.append((part, strand, strand3p, use_undostack))
else: # idx3p is a selected endpoint
strand_dict[strand] = True
else:
if not strand5p: # idx5p is a selected endpoint
strand_dict[strand] = True
if use_undostack and xoList:
self.undoStack().beginMacro("Delete xovers")
for part, strand, strand3p, useUndo in xoList:
NucleicAcidPart.removeXover(part, strand, strand3p, useUndo)
self.removeStrandFromSelection(strand)
self.removeStrandFromSelection(strand3p)
self._selection_dict = {}
self.documentClearSelectionsSignal.emit(self)
if use_undostack:
if xoList: # end xover macro if it was started
self.undoStack().endMacro()
if True in strand_dict.values():
self.undoStack().beginMacro("Delete selection")
else:
return # nothing left to do
for strand, delete in strand_dict.items():
if delete:
strand.strandSet().removeStrand(strand)
if use_undostack:
self.undoStack().endMacro()
# end def
def resizeSelection(self, delta: int, use_undostack: bool = True):
"""Moves the selected idxs by delta by first iterating over all strands
to calculate new idxs (method will return if snap-to behavior would
create illegal state), then applying a resize command to each strand.
Args:
delta:
use_undostack: optional, default is ``True``
"""
resize_list = []
vh_set = set()
# calculate new idxs
part = None
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
if part is None:
part = strand.part()
idx_low, idx_high = strand.idxs()
new_low, new_high = strand.idxs()
delta_low = delta_high = delta
# process xovers to get revised delta
if selected[0] and strand.connectionLow():
new_low = part.xoverSnapTo(strand, idx_low, delta)
if new_low is None:
return
delta_high = new_low - idx_low
if selected[1] and strand.connectionHigh():
new_high = part.xoverSnapTo(strand, idx_high, delta)
if new_high is None:
return
delta_low = new_high - idx_high
# process endpoints
if selected[0] and not strand.connectionLow():
new_low = idx_low + delta_low
if selected[1] and not strand.connectionHigh():
new_high = idx_high + delta_high
if new_low > new_high: # check for illegal state
return
vh_set.add(strand.idNum())
resize_list.append((strand, new_low, new_high))
# end for
# end for
# execute the resize commands
us = self.undoStack()
if use_undostack:
us.beginMacro("Resize Selection")
for strand, idx_low, idx_high in resize_list:
Strand.resize(strand,
(idx_low, idx_high),
use_undostack,
update_segments=False)
if resize_list:
cmd = RefreshSegmentsCommand(part, vh_set)
if use_undostack:
us.push(cmd)
else:
cmd.redo()
if use_undostack:
us.endMacro()
# end def
def updateStrandSelection(self):
"""Do it this way in the future when we have
a better signaling architecture between views
For now, individual objects need to emit signals
"""
oligos_selected_set = set()
oligos_set = set()
for obj, value in self._strand_selected_changed_dict.items():
oligo = obj.oligo()
oligos_set.add(oligo)
if True in value:
oligos_selected_set.add(oligo)
obj.strandSelectedChangedSignal.emit(obj, value)
# end for
for oligo in oligos_selected_set:
oligo.oligoSelectedChangedSignal.emit(oligo, True)
oligos_deselected_set = oligos_set - oligos_selected_set
for oligo in oligos_deselected_set:
oligo.oligoSelectedChangedSignal.emit(oligo, False)
self._strand_selected_changed_dict = {}
# end def
def resetViews(self):
"""This is a fast way to clear selections and the views.
We could manually deselect each item from the Dict, but we'll just
let them be garbage collect
the dictionary maintains what is selected
"""
# print("reset views")
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._strand_selected_changed_dict = {}
self.documentViewResetSignal.emit(self)
# end def
def makeNew(self, fname: str = "untitled.json"):
"""For use in creating a new ``Document``
Args:
fname: new filename, default is ``untitled.json``
"""
self.clearAllSelected()
self.resetViews()
setBatch(True)
self.removeAllChildren() # clear out old parts
setBatch(False)
self.undoStack().clear() # reset undostack
self.deactivateActivePart()
self._filename = fname
# end def
def setViewNames(self, view_name_list: List[str], do_clear: bool = False):
"""Tell the model what views the document should support
Allows non-visible views to be used.
Intended to be called at application launch only at present.
Args:
view_name_list: List of view names like `slice`, `path`, or `inspector`
do_clear:: optional, clear the names or not? defaults to ``False``
"""
view_names = [] if do_clear else self.view_names
for view_name in view_name_list:
if view_name not in view_names:
view_names.append(view_name)
self.view_names = view_names
# end def
# PUBLIC METHODS FOR EDITING THE MODEL #
def createNucleicAcidPart( self,
use_undostack: bool = True,
grid_type: EnumType = GridEnum.NONE,
is_lattice: bool = True
) -> NucleicAcidPart:
"""Create and store a new DnaPart and instance, and return the instance.
Args:
use_undostack: optional, defaults to True
grid_type: optional default to GridEnum.NONE
Returns
new :obj:`NucleicAcidPart`
"""
dna_part = NucleicAcidPart(document=self, grid_type=grid_type, is_lattice=is_lattice)
self._addPart(dna_part, use_undostack=use_undostack)
return dna_part
# end def
def getParts(self) -> Iterator[Part]:
"""Get all child :obj:`Part` in the document
Yields:
the next :obj:`Part` in the the Set of children
"""
for item in self._children:
if isinstance(item, Part):
yield item
# end def
def getPartByUUID(self, uuid: str) -> Part:
"""Get the part given the uuid string
Args:
uuid: of the part
Returns:
Part
Raises:
KeyError: no part with that UUID
"""
for item in self._children:
if isinstance(item, Part) and item.uuid == uuid:
return item
raise KeyError("Part with uuid {} not found".format(uuid))
# end def
# PUBLIC SUPPORT METHODS #
def appWindow(self) -> WindowT:
return self._app_window
# end def
def setAppWindow(self, app_window: WindowT):
"""Called by :meth:`CNMainWindow.setDocument` method."""
self._app_window = app_window
# end def
# PRIVATE SUPPORT METHODS #
def _addPart(self, part: Part, use_undostack: bool = True):
"""Add part to the document via AddInstanceCommand.
"""
c = AddInstanceCommand(self, part)
util.doCmd(self, c, use_undostack)
# end def
def createMod( self,
params: dict,
mid: str = None,
use_undostack: bool = True) -> Tuple[dict, str]:
"""Create a modification
Args:
params:
mid: optional, modification ID string
use_undostack: optional, default is ``True``
Returns:
tuple of :obj:`dict`, :obj:`str` of form::
(dictionary of modification paramemters, modification ID string)
Raises:
KeyError: Duplicate mod ID
"""
if mid is None:
mid = uuid4().hex
elif mid in self._mods:
raise KeyError("createMod: Duplicate mod id: {}".format(mid))
name = params.get('name', mid)
color = params.get('color', '#00FF00')
seq5p = params.get('seq5p', '')
seq3p = params.get('seq3p', '')
seqInt = params.get('seqInt', '')
note = params.get('note', '')
cmdparams = {
'props': {'name': name,
'color': color,
'note': note,
'seq5p': seq5p,
'seq3p': seq3p,
'seqInt': seqInt,
},
'ext_locations': set(), # external mods, mod belongs to idx outside of strand
'int_locations': set() # internal mods, mod belongs between idx and idx + 1
}
item = {'name': name,
'color': color,
'note': note,
'seq5p': seq5p,
'seq3p': seq3p,
'seqInt': seqInt
}
c = AddModCommand(self, cmdparams, mid)
util.doCmd(self, c, use_undostack=use_undostack)
return item, mid
# end def
def modifyMod(self, params: dict, mid: str, use_undostack: bool = True):
"""Modify an existing modification
Args:
params:
mid: optional, modification ID string
use_undostack: optional, default is ``True``
"""
if mid in self._mods:
c = ModifyModCommand(self, params, mid)
util.doCmd(self, c, use_undostack=use_undostack)
# end def
def destroyMod(self, mid: str, use_undostack: bool = True):
"""Destroy an existing modification
Args:
mid: optional, modification ID string
use_undostack: optional, default is ``True``
"""
if mid in self._mods:
c = RemoveModCommand(self, mid)
util.doCmd(self, c, use_undostack=use_undostack)
# end def
def getMod(self, mid: str) -> Optional[dict]:
"""Get an existing modification
Args:
mid: modification ID string
Returns:
dict or None
"""
return self._mods.get(mid)
# end def
def getModProperties(self, mid: str) -> Optional[dict]:
"""Get an existing modification properties
Args:
mid: modification ID string
Returns:
dict or None
"""
return self._mods.get(mid)['props']
# end def
def getModLocationsSet(self, mid: str, is_internal: bool) -> dict:
"""Get an existing modifications locations in a ``Document``
(``Part``, Virtual Helix ID, ``Strand``)
Args:
mid: modification ID string
is_internal:
Returns:
dict
"""
if is_internal:
return self._mods[mid]['int_locations']
else:
return self._mods[mid]['ext_locations']
# end def
def addModInstance(self, mid: str, is_internal: bool, part: Part, key: str):
"""Add an instance of a modification to the Document
Args:
mid: modification id string
is_internal:
part: associated Part
key: key of the modification at the part level
"""
location_set = self.getModLocationsSet(mid, is_internal)
doc_key = ''.join((part.uuid, ',', key))
location_set.add(doc_key)
# end def
def removeModInstance(self, mid: str, is_internal: bool, part: Part, key: str):
"""Remove an instance of a modification from the Document
Args:
mid: modification id string
is_internal:
part: associated Part
key: key of the modification at the part level
"""
location_set = self.getModLocationsSet(mid, is_internal)
doc_key = ''.join((part.uuid, ',', key))
location_set.remove(doc_key)
# end def
def modifications(self) -> dict:
"""Get a copy of the dictionary of the modifications in this ``Document``
Returns:
dictionary of the modifications
"""
mods = self._mods
res = {}
for mid in list(mods.keys()):
mod_dict = mods[mid]
res[mid] = {'props': mod_dict['props'].copy(),
'int_locations': list(mod_dict['int_locations']),
'ext_locations': list(mod_dict['ext_locations'])
}
return res
# end def
def getModStrandIdx(self, key: str) -> Tuple[Part, Strand, int]:
"""Convert a key of a mod instance relative to a part
to a part, a strand and an index
Args:
key: Mod key
Returns:
tuple of the form::
(Part, Strand, and index)
"""
keylist = key.split(',')
part_uuid = keylist[0]
id_num = int(keylist[1])
is_fwd = int(keylist[2]) # enumeration of StrandEnum.FWD or StrandEnum.REV
idx = int(keylist[3])
part = self.getPartByUUID(part_uuid)
strand = part.getStrand(is_fwd, id_num, idx)
return part, strand, idx
# end def
def getModSequence(self, mid: str, mod_type: int) -> Tuple[str, str]:
"""Getter for the modification sequence give by the arguments
Args:
mid: mod id or ``None``
mod_type: [ModEnum.END_5PRIME, ModEnum.END_3PRIME]
Returns:
tuple: of :obj:`str` of form::
(sequence, name)
"""
mod_dict = self._mods.get(mid)
name = '' if mid is None else mod_dict['name']
if mod_type == ModEnum.END_5PRIME:
seq = '' if mid is None else mod_dict['seq5p']
elif mod_type == ModEnum.END_3PRIME:
seq = '' if mid is None else mod_dict['seq3p']
else:
seq = '' if mid is None else mod_dict['seqInt']
return seq, name
# end def
def getGridType(self) -> EnumType:
"""Get the current Grid type
Returns:
The current Grid type
"""
if self.activePart():
return self.activePart().getGridType()
# end def
def setGridType(self, grid_type: EnumType):
"""Set the current Grid type
"""
if self.activePart():
self.activePart().setGridType(grid_type)
# end def
# end class
| mit | 2,171,655,359,132,237,600 | 32.491329 | 107 | 0.561529 | false |
digistam/recon-ng | modules/recon/domains-hosts/ssl_san.py | 1 | 1509 | import module
# unique to module
import re
class Module(module.Module):
def __init__(self, params):
module.Module.__init__(self, params, query='SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL ORDER BY domain')
self.info = {
'Name': 'SSL SAN Lookup',
'Author': 'Zach Grace (@ztgrace) [email protected]',
'Description': 'Uses the ssltools.com site to obtain the Subject Alternative Names for a domain. Updates the \'hosts\' table with the results.',
'Comments': [
'For an alternative version see https://github.com/403labs/recon-ng_modules.'
]
}
def module_run(self, domains):
cnt = 0
new = 0
for domain in domains:
self.heading(domain, level=0)
url = 'http://www.ssltools.com/certificate_lookup/%s' % domain
html = self.request(url).text
match = re.search('<br>Subject Alternative Names :(.*?)<br>', html)
if match is None:
self.output('No Subject Alternative Names found for \'%s\'' % domain)
continue
names = match.group(1)
hosts = [x.strip() for x in names.split(',') if '*' not in x]
for host in hosts:
self.output(host)
new += self.add_hosts(host)
cnt += 1
self.summarize(new, cnt)
| gpl-3.0 | -5,221,244,545,110,486,000 | 40.916667 | 165 | 0.520875 | false |
getsentry/zeus | zeus/models/artifact.py | 1 | 1714 | from base64 import b64decode
from io import BytesIO
from zeus.config import db
from zeus.constants import Status
from zeus.db.mixins import RepositoryBoundMixin, StandardAttributes
from zeus.db.types import Enum, File, FileData, GUID
from zeus.utils import timezone
class Artifact(RepositoryBoundMixin, StandardAttributes, db.Model):
job_id = db.Column(
GUID, db.ForeignKey("job.id", ondelete="CASCADE"), nullable=False
)
testcase_id = db.Column(
GUID, db.ForeignKey("testcase.id", ondelete="CASCADE"), nullable=True
)
name = db.Column(db.String(length=256), nullable=False)
type = db.Column(db.String(length=64), nullable=True)
file = db.Column(
File(path="artifacts"),
nullable=False,
# TODO(dcramer): this is super hacky but not sure a better way to
# do it with SQLAlchemy
default=lambda: FileData({}, default_path="artifacts"),
)
status = db.Column(Enum(Status), nullable=False, default=Status.unknown)
date_started = db.Column(db.TIMESTAMP(timezone=True), nullable=True)
date_updated = db.Column(
db.TIMESTAMP(timezone=True), nullable=True, onupdate=timezone.now
)
date_finished = db.Column(db.TIMESTAMP(timezone=True), nullable=True)
job = db.relationship("Job", innerjoin=True, uselist=False)
testcase = db.relationship("TestCase", uselist=False)
__tablename__ = "artifact"
__table_args__ = (db.Index("idx_artifact_job", "repository_id", "job_id"),)
def save_base64_content(self, base64):
content = b64decode(base64)
self.file.save(
BytesIO(content),
"{0}/{1}_{2}".format(self.job_id.hex, self.id.hex, self.name),
)
| apache-2.0 | 2,436,007,010,914,828,300 | 37.088889 | 79 | 0.672112 | false |
vpodzime/pykickstart | tests/commands/autopart.py | 1 | 6845 | #
# Chris Lumens <[email protected]>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.errors import KickstartValueError
class FC3_TestCase(CommandTest):
command = "autopart"
def runTest(self):
# pass
self.assert_parse("autopart", "autopart\n")
# fail - on FC3, autopart took no options so this raises a different
# exception than later releases.
if self.__class__.__name__ == "FC3_TestCase":
self.assert_parse_error("autopart --blah", KickstartValueError)
class F9_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
# pass
self.assert_parse("autopart --passphrase=whatever", "autopart\n")
self.assert_parse("autopart --encrypted", "autopart --encrypted\n")
self.assert_parse("autopart --encrypted --passphrase=\"whatever\"",
"autopart --encrypted --passphrase=\"whatever\"\n")
self.assert_parse("autopart --encrypted --passphrase=whatever",
"autopart --encrypted --passphrase=\"whatever\"\n")
# fail
self.assert_parse_error("autopart --passphrase")
self.assert_parse_error("autopart --encrypted --passphrase")
self.assert_parse_error("autopart --encrypted=False")
self.assert_parse_error("autopart --encrypted=True")
class F12_TestCase(F9_TestCase):
def runTest(self):
# Run F9 test case
F9_TestCase.runTest(self)
# pass
self.assert_parse("autopart --escrowcert=\"http://x/y\"", "autopart\n")
self.assert_parse("autopart --encrypted --backuppassphrase",
"autopart --encrypted\n")
self.assert_parse("autopart --encrypted --escrowcert=\"http://x/y\"",
"autopart --encrypted --escrowcert=\"http://x/y\"\n")
self.assert_parse("autopart --encrypted --escrowcert=\"http://x/y\" "
"--backuppassphrase",
"autopart --encrypted --escrowcert=\"http://x/y\" "
"--backuppassphrase\n")
self.assert_parse("autopart --encrypted --escrowcert=http://x/y",
"autopart --encrypted --escrowcert=\"http://x/y\"\n")
# fail
self.assert_parse_error("autopart --escrowcert")
self.assert_parse_error("autopart --escrowcert --backuppassphrase")
self.assert_parse_error("autopart --encrypted --escrowcert "
"--backuppassphrase")
self.assert_parse_error("autopart --backuppassphrase=False")
self.assert_parse_error("autopart --backuppassphrase=True")
class RHEL6_TestCase(F12_TestCase):
def runTest(self):
F12_TestCase.runTest(self)
# pass
self.assert_parse("autopart --cipher=foo", "autopart\n")
self.assert_parse("autopart --encrypted --cipher=3-rot13",
"autopart --encrypted --cipher=\"3-rot13\"\n")
# fail
self.assert_parse_error("autopart --cipher")
self.assert_parse_error("autopart --encrypted --cipher")
class F16_TestCase(F12_TestCase):
def runTest(self):
# Run F12 test case
F12_TestCase.runTest(self)
if "--type" not in self.optionList:
# pass
self.assert_parse("autopart --nolvm",
"autopart --nolvm\n")
# fail
self.assert_parse_error("autopart --nolvm=asdf")
self.assert_parse_error("autopart --nolvm True", KickstartValueError)
self.assert_parse_error("autopart --nolvm=1")
self.assert_parse_error("autopart --nolvm 0", KickstartValueError)
class F17_TestCase(F16_TestCase):
def runTest(self):
# Run F16 test case
F16_TestCase.runTest(self)
# pass
self.assert_parse("autopart --type=plain",
"autopart --type=plain\n")
self.assert_parse("autopart --type=partition",
"autopart --type=plain\n")
self.assert_parse("autopart --type=lvm",
"autopart --type=lvm\n")
self.assert_parse("autopart --type=btrfs",
"autopart --type=btrfs\n")
self.assert_parse("autopart --nolvm",
"autopart --type=plain\n")
# don't add --type= if none was specified
self.assert_parse("autopart",
"autopart\n")
# fail
self.assert_parse_error("autopart --type")
class F18_TestCase(F17_TestCase):
def runTest(self):
F17_TestCase.runTest(self)
# pass
self.assert_parse("autopart --cipher=foo", "autopart\n")
self.assert_parse("autopart --encrypted --cipher=3-rot13",
"autopart --encrypted --cipher=\"3-rot13\"\n")
# fail
self.assert_parse_error("autopart --cipher")
self.assert_parse_error("autopart --encrypted --cipher")
class F20_TestCase(F18_TestCase):
def runTest(self):
F18_TestCase.runTest(self)
self.assert_parse("autopart --type=thinp",
"autopart --type=thinp\n")
class F21_TestCase(F20_TestCase):
def runTest(self):
F20_TestCase.runTest(self)
# pass
self.assert_parse("autopart --fstype=ext4",
'autopart --fstype=ext4\n')
self.assert_parse("autopart --encrypted --fstype=ext4",
'autopart --encrypted --fstype=ext4\n')
self.assert_parse("autopart --type=lvm --fstype=xfs",
"autopart --type=lvm --fstype=xfs\n")
# fail
self.assert_parse_error("autopart --fstype")
self.assert_parse_error("autopart --fstype=btrfs")
self.assert_parse_error("autopart --type=btrfs --fstype=xfs")
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -5,216,144,673,085,012,000 | 38.566474 | 81 | 0.6 | false |
cduvedi/CS229-project | deeplearning/preprocess.py | 1 | 4296 | #!/usr/bin/env python
# preprocess.py - Preprocess the data
# Common imports
import os
import sys
import time
import numpy
import csv
from sklearn.preprocessing import normalize
# Imorts from other custom modules
def load_set(file):
X = list()
Y = list()
filehandle = open(file, 'r')
reader = csv.reader(filehandle)
for row in reader:
labelStr, featureStr, tp = row
label = int(labelStr)
features = map(lambda x: float(x), featureStr.split(' '))
X.append(features)
Y.append(label)
xy = [X, Y]
return(xy)
def preProcessImages(datasets):
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# For each training image, validation image, test image
# Subtract the mean of the image from each pixel
# Normalize the norm to 10
for idx, image in enumerate(train_set_x):
img_mean = numpy.mean(image)
for idy, pixel in enumerate(image):
train_set_x[idx][idy] = train_set_x[idx][idy] - img_mean
train_set_x = normalize(train_set_x, axis=1) * 100
for idx, image in enumerate(valid_set_x):
img_mean = numpy.mean(image)
for idy, pixel in enumerate(image):
valid_set_x[idx][idy] = valid_set_x[idx][idy] - img_mean
valid_set_x = normalize(valid_set_x, axis=1) * 100
for idx, image in enumerate(test_set_x):
img_mean = numpy.mean(image)
for idy, pixel in enumerate(image):
test_set_x[idx][idy] = test_set_x[idx][idy] - img_mean
test_set_x = normalize(test_set_x, axis=1) * 100
# Find the mean and standard deviation of each pixel in the normalized training set
# Subtract this mean from each pixel in all images
# Divide each pixel value by its standard deviation
train_set_x = numpy.transpose(train_set_x)
valid_set_x = numpy.transpose(valid_set_x)
test_set_x = numpy.transpose(test_set_x)
for idx, x in enumerate(train_set_x):
mean = numpy.mean(x)
var = numpy.var(x)
for idy, y in enumerate(x):
train_set_x[idx][idy] = train_set_x[idx][idy] - mean
train_set_x[idx][idy] = train_set_x[idx][idy] / var
for idx, x in enumerate(valid_set_x):
mean = numpy.mean(x)
var = numpy.var(x)
for idy, y in enumerate(x):
valid_set_x[idx][idy] = valid_set_x[idx][idy] - mean
valid_set_x[idx][idy] = valid_set_x[idx][idy] / var
for idx, x in enumerate(test_set_x):
mean = numpy.mean(x)
var = numpy.var(x)
for idy, y in enumerate(x):
test_set_x[idx][idy] = test_set_x[idx][idy] - mean
test_set_x[idx][idy] = test_set_x[idx][idy] / var
# Transpose back before returning
train_set_x = numpy.transpose(train_set_x)
valid_set_x = numpy.transpose(valid_set_x)
test_set_x = numpy.transpose(test_set_x)
ret_val = [ [train_set_x, train_set_y], [valid_set_x, valid_set_y], [test_set_x, test_set_y] ]
return ret_val
def load_data():
train_set_x, train_set_y = load_set('/home/cduvedi/theano/train.csv')
test_set_x, test_set_y = load_set('/home/cduvedi/theano/test.csv')
valid_set_x, valid_set_y = load_set('/home/cduvedi/theano/valid.csv')
print '... loaded data'
print 'train: ', len(train_set_x)
print 'test: ', len(test_set_x)
print 'valid: ', len(valid_set_x)
ret_val = [ [train_set_x, train_set_y], [valid_set_x, valid_set_y], [test_set_x, test_set_y] ]
return ret_val
def write_data(datasets):
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
train_file = open('train_preprocessed.csv', 'w')
for idy, y in enumerate(train_set_y):
train_file.write(str(y) + ',')
for idx, x in enumerate(train_set_x[idy]):
train_file.write(str(train_set_x[idy][idx]) + ' ')
train_file.write('\n')
train_file.close()
valid_file = open('valid_preprocessed.csv', 'w')
for idy, y in enumerate(valid_set_y):
valid_file.write(str(y) + ',')
for idx, x in enumerate(valid_set_x[idy]):
valid_file.write(str(valid_set_x[idy][idx]) + ' ')
valid_file.write('\n')
valid_file.close()
test_file = open('test_preprocessed.csv', 'w')
for idy, y in enumerate(test_set_y):
test_file.write(str(y) + ',')
for idx, x in enumerate(test_set_x[idy]):
test_file.write(str(test_set_x[idy][idx]) + ' ')
test_file.write('\n')
test_file.close()
if __name__ == '__main__':
datasets = load_data()
datasets = preProcessImages(datasets)
write_data(datasets)
| gpl-2.0 | 6,570,296,486,117,475,000 | 28.627586 | 95 | 0.664804 | false |
abinit/abinit | developers/pltdiff.py | 1 | 5645 | #!/usr/bin/env python
"""
This script uses matplotlib to compare reference output file(s) with data in tabular form
with the corresponding file(s) generated by the automatic test.
Multiple files can be specified via the command line interface.
Example:
pltdiff.py t88o_DS2_PHDOS
Usage: pltdiff.py file1 [file2, ...]
"""
from __future__ import unicode_literals, division, print_function, absolute_import
import sys
import os
import numpy as np
def find_top_build_tree(start_path, with_abinit=True, ntrials=10):
"""
Returns the absolute path of the ABINIT build tree.
Assume start_path is within the build tree.
Raises:
`RuntimeError` if build tree is not found after ntrials attempts.
"""
abs_path = os.path.abspath(start_path)
trial = 0
while trial <= ntrials:
config_h = os.path.join(abs_path, "config.h")
abinit_bin = os.path.join(abs_path, "src", "98_main", "abinit")
# Check if we are in the top of the ABINIT source tree
if with_abinit:
found = os.path.isfile(config_h) and os.path.isfile(abinit_bin)
else:
found = os.path.isfile(config_h)
if found:
return abs_path
else:
abs_path, tail = os.path.split(abs_path)
trial += 1
raise RuntimeError("Cannot find the ABINIT build tree after %s trials" % ntrials)
def get_array_from_path(path):
"""Parse text file with data in tabular form, return numpy array."""
data = []
with open(path, "rt") as fh:
for l in fh:
l = l.strip()
if l.startswith("#") or not l: continue
data.append(list(map(float, l.split())))
return np.array(data).T.copy()
def get_axarray_fig_plt(ax_array, nrows=1, ncols=1, sharex=False, sharey=False,
squeeze=True, subplot_kw=None, gridspec_kw=None,
**fig_kw):
"""
Helper function used in plot functions that accept an optional array of Axes
as argument. If ax_array is None, we build the `matplotlib` figure and
create the array of Axes by calling plt.subplots else we return the
current active figure.
Returns:
ax: Array of :class:`Axes` objects
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
if ax_array is None:
fig, ax_array = plt.subplots(nrows=nrows, ncols=ncols, sharex=sharex,
sharey=sharey, squeeze=squeeze,
subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw, **fig_kw)
else:
fig = plt.gcf()
ax_array = np.reshape(np.array(ax_array), (nrows, ncols))
if squeeze:
if ax_array.size == 1:
ax_array = ax_array[0]
elif any(s == 1 for s in ax_array.shape):
ax_array = ax_array.ravel()
return ax_array, fig, plt
def compare_data(ref_data, out_data, title):
print("ref_data.shape:", ref_data.shape, "out_data.shape:", out_data.shape)
#if ref_data.shape == out_data.shape:
# print(ref_data - out_data)
nrows = min(len(ref_data), len(out_data)) - 1
axlist, fig, plt = get_axarray_fig_plt(None, nrows=nrows, ncols=1, sharex=True, sharey=False,
squeeze=True, subplot_kw=None, gridspec_kw=None)
axlist = np.array(axlist).ravel()
# Assume file contains data in the form | x | f(x) | g(x) | ....
ref_xs = ref_data[0]
out_xs = out_data[0]
for i, ax in enumerate(axlist):
ax.plot(ref_xs, ref_data[i+1], "--", label="ref" if i == 0 else None)
ax.plot(out_xs, out_data[i+1], ":", label="out" if i == 0 else None)
ax.grid(True)
if i == 0: ax.legend(loc="best", shadow=True)
fig.suptitle(title)
plt.show()
def main():
if len(sys.argv) == 1 or "-h" in sys.argv or "--help" in sys.argv:
print(__doc__)
return 1
out_paths = sys.argv[1:]
out_paths = [os.path.abspath(p) for p in out_paths]
find_refs = len(out_paths) == 1
if find_refs:
top = find_top_build_tree(".", with_abinit=False)
# Need top level directory for ref files so check if tests/v1 exists else try ..
v1refs = os.path.join(top, "tests", "v1", "Refs")
print(v1refs)
if not os.path.isdir(v1refs):
top = os.path.join(top, "..")
v1refs = os.path.join(top, "tests", "v1", "Refs")
if not os.path.isdir(v1refs):
raise RuntimeError("Cannot find top-level abinit directory containing ~abinit/v1/Refs")
for out_path in out_paths:
# Get the name of the substuite from dirname e.g. v7_t85-t86-t87-t88-t89
suite_dir = os.path.basename(os.path.dirname(out_path)).split("_")[0]
ref_path = os.path.join(top, "tests", suite_dir, "Refs", os.path.basename(out_path))
print("Comparing:", os.path.relpath(out_path), "with", ref_path)
if not os.path.isfile(ref_path):
raise RuntimeError("Cannot find reference file: `%s`" % ref_path)
ref_data = get_array_from_path(ref_path)
out_data = get_array_from_path(out_path)
compare_data(ref_data, out_data, os.path.basename(out_path))
else:
assert len(out_paths) == 2
ref_data = get_array_from_path(out_paths[0])
out_data = get_array_from_path(out_paths[1])
compare_data(ref_data, out_data, os.path.basename(out_paths[0]))
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | -4,034,028,275,814,302,700 | 34.727848 | 103 | 0.586891 | false |
melqkiades/yelp | source/python/topicmodeling/context/topic_latex_generator.py | 1 | 19831 |
import colorsys
import time
import nltk
import re
from pylatex.base_classes import CommandBase, Arguments
from pylatex import Document, Section, Subsection, Command, UnsafeCommand, Tabular, LongTabu, Itemize, NoEscape
from pylatex.package import Package
import pattern
from etl import ETLUtils
from nlp import nlp_utils
from topicmodeling.context import topic_model_creator
from topicmodeling.context.topic_model_analyzer import \
split_topic
from utils import utilities
from utils.utilities import context_words
from utils.constants import Constants
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class ColorBoxCommand(CommandBase):
"""
A class representing a custom LaTeX command.
This class represents a custom LaTeX command named
``exampleCommand``.
"""
_latex_name = 'exampleCommand'
packages = [Package('color')]
def extract_words(text):
"""
Tags the words contained in the given text using part-of-speech tags
:param text: the text to tag
:return: a list with pairs, in the form of (word, tag)
"""
# Remove double whitespaces
paragraph = re.sub("\s\s+", " ", text)
# Split the paragraph in sentences
sentence_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = sentence_tokenizer.tokenize(paragraph)
review_words = []
for sentence in sentences:
words = [word.lower() for word in nltk.word_tokenize(sentence)]
review_words.extend(words)
return review_words
def extract_topic_words(topic_model, topic_ids):
num_terms = Constants.TOPIC_MODEL_STABILITY_NUM_TERMS
topic_words_map = {}
for topic_id in topic_ids:
probability_words =\
topic_model.print_topic(topic_id, num_terms).split(' + ')
words = set([word.split('*')[1] for word in probability_words])
topic_words_map[topic_id] = words
return topic_words_map
def bold_mapper(text):
return '\\textbf{%s}' % text
def background_color_mapper(text_color):
text_color_split = text_color.split('|||')
text = text_color_split[0]
red = float(text_color_split[1])
green = float(text_color_split[2])
blue = float(text_color_split[3])
return '\\colorbox[rgb]{%f,%f,%f}{%s}' % (red, green, blue, text)
class TopicLatexGenerator:
def __init__(self, lda_based_context):
self.lda_based_context = lda_based_context
self.doc =\
Document(Constants.ITEM_TYPE + '-topic-models-nouns-complete-3')
self.num_cols = Constants.TOPIC_MODEL_STABILITY_NUM_TERMS
self.num_topics = Constants.TOPIC_MODEL_NUM_TOPICS
self.rgb_tuples = None
self.automatic_context_topic_colors = None
self.keyword_context_topic_colors = None
self.manual_context_topic_colors = None
self.automatic_context_topic_ids = None
self.keyword_context_topic_ids = None
self.manual_context_topic_ids = None
self.automatic_context_topic_words = None
self.keyword_context_topic_words = None
self.manual_context_topic_words = None
self.headers = None
self.topic_words_map = None
self.table_format = '|c|' + 'c|' * (self.num_cols + 1)
self.tagger = nltk.PerceptronTagger()
self.tag_count_map = {'NN': 0, 'JJ': 0, 'VB': 0}
self.init_colors()
self.init_headers()
self.init_topic_words()
self.init_topic_ids()
self.doc.packages.append(Package('color'))
new_comm = UnsafeCommand(
'newcommand', '\exampleCommand', options=4,
extra_arguments=r'\colorbox[rgb]{#1,#2,#3}{#4} \color{black}')
self.doc.append(new_comm)
new_comm2 = UnsafeCommand('tiny')
self.doc.append(new_comm2)
def init_colors(self):
golden_ratio = 0.618033988749895
hsv_tuples = [((x * golden_ratio) % 1.0, 0.5, 0.95)
for x in range(self.num_topics)]
self.rgb_tuples = map(lambda hsv: colorsys.hsv_to_rgb(*hsv), hsv_tuples)
color_index = 0
self.automatic_context_topic_colors = {}
for topic in self.lda_based_context.context_rich_topics:
topic_id = topic[0]
self.automatic_context_topic_colors[topic_id] = color_index
# self.rgb_tuples[color_index]
color_index += 1
#
# color_index = 0
# self.keyword_context_topic_colors = {}
# for topic_id in range(self.num_topics):
# topic_score = split_topic(
# self.lda_based_context.topic_model.print_topic(
# topic_id, topn=self.num_cols))
# if topic_score['score'] > 0:
# self.keyword_context_topic_colors[topic_id] = color_index
# color_index += 1
color_index = 0
self.manual_context_topic_colors = {}
for topic in context_words[Constants.ITEM_TYPE]:
self.manual_context_topic_colors[topic] = color_index
color_index += 1
def init_headers(self):
self.headers = ['ID', 'Ratio']
for column_index in range(self.num_cols):
self.headers.append('Word ' + str(column_index + 1))
def init_topic_words(self):
# pass
self.topic_words_map = \
extract_topic_words(
self.lda_based_context, range(self.num_topics))
def init_topic_ids(self):
self.automatic_context_topic_ids = [
topic[0] for topic in self.lda_based_context.context_rich_topics]
#
# self.keyword_context_topic_ids = []
# for topic_id in range(self.num_topics):
# topic_score = split_topic(
# self.lda_based_context.topic_model.print_topic(
# topic_id, topn=self.num_cols))
# if topic_score['score'] > 0:
# self.keyword_context_topic_ids.append(topic_id)
self.manual_context_topic_ids = range(len(context_words[Constants.ITEM_TYPE]))
def create_automatic_context_topics(self):
with self.doc.create(Section(
Constants.ITEM_TYPE.title() +
' context-rich topic models (automatic)')):
num_context_topics = len(self.lda_based_context.context_rich_topics)
with self.doc.create(LongTabu(self.table_format)) as table:
table.add_hline()
table.add_row(self.headers, mapper=bold_mapper)
table.add_hline()
for topic in self.lda_based_context.context_rich_topics:
topic_id = topic[0]
ratio = topic[1]
color_id = self.automatic_context_topic_colors[topic_id]
color = self.rgb_tuples[color_id]
id_cell = str(topic_id) + str('|||') + str(color[0]) + \
'|||' + str(color[1]) + '|||' + str(color[2])
ratio_cell = str(ratio) + str('|||') + str(color[0]) + \
'|||' + str(color[1]) + '|||' + str(color[2])
row = [id_cell, ratio_cell]
# row = [str(topic_id + 1)]
topic_words =\
self.lda_based_context.print_topic(
topic_id, self.num_cols).split(' + ')
for word in topic_words:
word_color = word + str('|||') + str(color[0]) +\
'|||' + str(color[1]) + '|||' +\
str(color[2])
row.append(word_color)
# row.extend(topic_words)
table.add_row(row, mapper=background_color_mapper)
table.add_hline()
self.doc.append(UnsafeCommand('par'))
self.doc.append(
'Number of context-rich topics: %d' % num_context_topics)
def create_keyword_context_topics(self):
with self.doc.create(Section(
Constants.ITEM_TYPE.title() +
' context-rich topic models (keyword)')):
num_context_topics = 0
with self.doc.create(Tabular(self.table_format)) as table:
table.add_hline()
table.add_row(self.headers, mapper=bold_mapper)
table.add_hline()
for topic_id in range(self.num_topics):
topic_score = split_topic(
self.lda_based_context.topic_model.print_topic(
topic_id, topn=self.num_cols))
if topic_score['score'] > 0:
color_id = self.keyword_context_topic_colors[topic_id]
color = self.rgb_tuples[color_id]
id_cell = str(topic_id)+str('|||')+str(color[0]) + \
'|||'+str(color[1])+'|||'+str(color[2])
row = [id_cell]
for column_index in range(self.num_cols):
word = topic_score['word' + str(column_index)]
word_color = word+str('|||')+str(color[0])+'|||' + \
str(color[1])+'|||'+str(color[2])
row.append(word_color)
table.add_row(row, mapper=background_color_mapper)
num_context_topics += 1
table.add_hline()
self.doc.append(UnsafeCommand('par'))
self.doc.append(
'Number of context-rich topics: %d' % num_context_topics)
def create_manual_context_topics(self):
with self.doc.create(Section(
Constants.ITEM_TYPE.title() +
' context-rich topic models (manual)')):
with self.doc.create(Tabular(self.table_format)) as table:
table.add_hline()
table.add_row(self.headers, mapper=bold_mapper)
table.add_hline()
for topic in context_words[Constants.ITEM_TYPE]:
color_id = self.manual_context_topic_colors[topic]
color = self.rgb_tuples[color_id]
id_cell = str(topic)+str('|||')+str(color[0]) + \
'|||'+str(color[1])+'|||'+str(color[2])
row = [id_cell]
for word in list(context_words[Constants.ITEM_TYPE][topic])[:self.num_cols + 1]:
word_color = word+str('|||')+str(color[0])+'|||' + \
str(color[1])+'|||'+str(color[2])
row.append(word_color)
table.add_row(row, mapper=background_color_mapper)
table.add_hline()
self.doc.append(UnsafeCommand('par'))
# self.doc.append(
# 'Number of context-rich topics: %d' %
# len(context_words[Constants.ITEM_TYPE]))
self.doc.append(ColorBoxCommand(
arguments=Arguments(
1, 1, 1, 'Number of context-rich topics: ' +
str(len(context_words[Constants.ITEM_TYPE])))))
def create_reviews(self):
with self.doc.create(Section('Reviews')):
with self.doc.create(Subsection('A subsection')):
sg_map = {'yes': 'specific', 'no': 'generic'}
review_index = 0
# full_records = ETLUtils.load_json_file(
# Constants.FULL_PROCESSED_RECORDS_FILE)
records_file = Constants.DATASET_FOLDER +\
'classified_' + Constants.ITEM_TYPE + '_reviews.json'
full_records = ETLUtils.load_json_file(records_file)
for record in full_records:
with self.doc.create(Subsection(
'Review %d (%s)' % (
(review_index + 1), sg_map[record['specific']]))):
# self.build_text(record[Constants.TEXT_FIELD])
# for doc_part in self.build_text(
# record[Constants.TEXT_FIELD]):
for doc_part in self.build_text_automatic(record):
self.doc.append(doc_part)
review_index += 1
def generate_pdf(self):
self.create_automatic_context_topics()
# self.create_keyword_context_topics()
# self.create_manual_context_topics()
self.create_reviews()
self.doc.generate_pdf()
self.doc.generate_tex()
def build_text(self, review):
words = extract_words(review)
doc_parts = []
new_words = []
for word in words:
word_found = False
for topic_id in self.automatic_context_topic_ids:
if word in self.topic_words_map[topic_id]:
self.tag_word(word)
doc_parts.append(' '.join(new_words))
# doc_parts.append('topic: %d word: %s' % (topic_id, word))
color_id = self.automatic_context_topic_colors[topic_id]
color = self.rgb_tuples[color_id]
# doc_parts.append(ColorBoxCommand(
# arguments=Arguments(
# color[0], color[1], color[2], word)))
new_words.append(
'\\colorbox[rgb]{' +
str(color[0]) + ',' + str(color[1]) + ',' +
str(color[2]) + '}{' + word + '}')
# new_words = []
word_found = True
break
if not word_found:
new_words.append(word)
print('new_words', new_words)
self.doc.append(NoEscape(' '.join(new_words)))
# return doc_parts
def build_text_automatic(self, record):
text = record[Constants.TEXT_FIELD]
sentences = nlp_utils.get_sentences(text)
lemmatized_words = []
for sentence in sentences:
lemmatized_words.append(nlp_utils.lemmatize_sentence(
sentence, nltk.re.compile(''),
min_length=1, max_length=100))
doc_parts = []
itemize = Itemize()
for sentence in lemmatized_words:
new_words = []
itemize.add_item('')
for tagged_word in sentence:
tag = tagged_word[1]
word = tagged_word[0]
singular = pattern.text.en.singularize(word)
word_found = False
# if tag == 'VBD':
# new_words.append(
# '\\colorbox[rgb]{0.5,0.5,0.5}{' + word + '}')
# word_found = True
#
# if tag.startswith('PRP'):
# new_words.append(
# '\\colorbox[rgb]{0.85,0.85,0.85}{' + word + '}')
# word_found = True
for topic_id in self.automatic_context_topic_ids:
if word in self.topic_words_map[topic_id]:
# if singular in context_words[Constants.ITEM_TYPE][topic]:
self.tag_word(word)
color_id = self.automatic_context_topic_colors[topic_id]
color = self.rgb_tuples[color_id]
new_words.append(
'\\colorbox[rgb]{' +
str(color[0]) + ',' + str(color[1]) + ',' +
str(color[2]) + '}{' + word + '}')
word_found = True
break
if not word_found:
new_words.append(word)
itemize.append(NoEscape(' '.join(new_words)))
doc_parts.append(itemize)
return doc_parts
def build_text_manual(self, record):
text = record[Constants.TEXT_FIELD]
sentences = nlp_utils.get_sentences(text)
lemmatized_words = []
for sentence in sentences:
lemmatized_words.append(nlp_utils.lemmatize_sentence(
sentence, nltk.re.compile(''),
min_length=1, max_length=100))
doc_parts = []
itemize = Itemize()
for sentence in lemmatized_words:
new_words = []
itemize.add_item('')
for tagged_word in sentence:
tag = tagged_word[1]
word = tagged_word[0]
singular = pattern.text.en.singularize(word)
word_found = False
if tag == 'VBD':
new_words.append('\\colorbox[rgb]{0.5,0.5,0.5}{'+ word + '}')
word_found = True
if tag.startswith('PRP'):
new_words.append('\\colorbox[rgb]{0.85,0.85,0.85}{' + word + '}')
word_found = True
for topic in context_words[Constants.ITEM_TYPE]:
if singular in context_words[Constants.ITEM_TYPE][topic]:
color_id = self.manual_context_topic_colors[topic]
color = self.rgb_tuples[color_id]
new_words.append(
'\\colorbox[rgb]{' +
str(color[0]) + ',' + str(color[1]) + ',' +
str(color[2]) + '}{' + word + '}')
word_found = True
break
if not word_found:
new_words.append(word)
itemize.append(NoEscape(' '.join(new_words)))
doc_parts.append(itemize)
return doc_parts
def tag_word(self, word):
tagged_word = self.tagger.tag([word])[0]
word_tag = tagged_word[1]
if word_tag.startswith('NN'):
self.tag_count_map['NN'] += 1
elif word_tag.startswith('JJ'):
self.tag_count_map['JJ'] += 1
elif word_tag.startswith('VB'):
self.tag_count_map['VB'] += 1
else:
if word_tag not in self.tag_count_map:
self.tag_count_map[word_tag] = 0
self.tag_count_map[word_tag] += 1
def get_topic_statistics(self, topic_ids):
tags_dict = {'NN': 0, 'JJ': 0, 'VB': 0}
topic_words = set()
for topic_id in topic_ids:
topic_words.update(self.topic_words_map[topic_id])
print(topic_words)
for word in topic_words:
tagged_word = self.tagger.tag([word])[0]
word_tag = tagged_word[1]
if word_tag.startswith('NN'):
tags_dict['NN'] += 1
elif word_tag.startswith('JJ'):
tags_dict['JJ'] += 1
elif word_tag.startswith('VB'):
tags_dict['VB'] += 1
else:
if word_tag not in tags_dict:
tags_dict[word_tag] = 0
tags_dict[word_tag] += 1
print(tags_dict)
total_count = 0.0
for tag_count in tags_dict.values():
total_count += tag_count
print('nouns percentage: %f%%' % (tags_dict['NN'] / total_count * 100))
return tags_dict
def main():
utilities.plant_seeds()
records = ETLUtils.load_json_file(Constants.PROCESSED_RECORDS_FILE)
if Constants.SEPARATE_TOPIC_MODEL_RECSYS_REVIEWS:
num_records = len(records)
records = records[:num_records / 2]
print('num_reviews', len(records))
context_extractor = \
topic_model_creator.create_topic_model(records, None, None)
topic_latex_generator = TopicLatexGenerator(context_extractor)
topic_latex_generator.generate_pdf()
start = time.time()
main()
end = time.time()
total_time = end - start
print("Total time = %f seconds" % total_time)
| lgpl-2.1 | -1,867,325,663,289,591,000 | 37.136538 | 111 | 0.517977 | false |
sajeeshcs/nested_quota_final | nova/compute/rpcapi.py | 1 | 46820 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _, _LW
from nova import objects
from nova.objects import base as objects_base
from nova.openstack.common import log as logging
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='The topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('compute',
help='Set a version cap for messages sent to compute services. If you '
'plan to do a live upgrade from havana to icehouse, you should '
'set this option to "icehouse-compat" before beginning the live '
'upgrade procedure.')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
LOG = logging.getLogger(__name__)
def _compute_host(host, instance):
'''Get the destination host for a message.
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A host
'''
if host:
return host
if not instance:
raise exception.NovaException(_('No compute host specified'))
if not instance['host']:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance['uuid'])
return instance['host']
class ComputeAPI(object):
'''Client side of the compute rpc API.
API version history:
* 1.0 - Initial version.
* 1.1 - Adds get_host_uptime()
* 1.2 - Adds check_can_live_migrate_[destination|source]
* 1.3 - Adds change_instance_metadata()
* 1.4 - Remove instance_uuid, add instance argument to
reboot_instance()
* 1.5 - Remove instance_uuid, add instance argument to
pause_instance(), unpause_instance()
* 1.6 - Remove instance_uuid, add instance argument to
suspend_instance()
* 1.7 - Remove instance_uuid, add instance argument to
get_console_output()
* 1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
* 1.9 - Remove instance_uuid, add instance argument to attach_volume()
* 1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
* 1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
* 1.12 - Remove instance_uuid, add instance argument to
confirm_resize()
* 1.13 - Remove instance_uuid, add instance argument to detach_volume()
* 1.14 - Remove instance_uuid, add instance argument to finish_resize()
* 1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
* 1.16 - Remove instance_uuid, add instance argument to
get_diagnostics()
* 1.17 - Remove instance_uuid, add instance argument to
get_vnc_console()
* 1.18 - Remove instance_uuid, add instance argument to inject_file()
* 1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
* 1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
* 1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
* 1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
* 1.23 - Remove instance_id, add instance argument to
pre_live_migration()
* 1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
* 1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
* 1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
* 1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
* 1.28 - Remove instance_uuid, add instance argument to reset_network()
* 1.29 - Remove instance_uuid, add instance argument to
resize_instance()
* 1.30 - Remove instance_uuid, add instance argument to
resume_instance()
* 1.31 - Remove instance_uuid, add instance argument to revert_resize()
* 1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
* 1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
* 1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
* 1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
* 1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
* 1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
* 1.38 - Changes to prep_resize():
* remove instance_uuid, add instance
* remove instance_type_id, add instance_type
* remove topic, it was unused
* 1.39 - Remove instance_uuid, add instance argument to run_instance()
* 1.40 - Remove instance_id, add instance argument to live_migration()
* 1.41 - Adds refresh_instance_security_rules()
* 1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
* 1.43 - Add migrate_data to live_migration()
* 1.44 - Adds reserve_block_device_name()
* 2.0 - Remove 1.x backwards compat
* 2.1 - Adds orig_sys_metadata to rebuild_instance()
* 2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
* 2.3 - Adds volume_id to reserve_block_device_name()
* 2.4 - Add bdms to terminate_instance
* 2.5 - Add block device and network info to reboot_instance
* 2.6 - Remove migration_id, add migration to resize_instance
* 2.7 - Remove migration_id, add migration to confirm_resize
* 2.8 - Remove migration_id, add migration to finish_resize
* 2.9 - Add publish_service_capabilities()
* 2.10 - Adds filter_properties and request_spec to prep_resize()
* 2.11 - Adds soft_delete_instance() and restore_instance()
* 2.12 - Remove migration_id, add migration to revert_resize
* 2.13 - Remove migration_id, add migration to finish_revert_resize
* 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
* 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
* 2.16 - Add instance_type to resize_instance
* 2.17 - Add get_backdoor_port()
* 2.18 - Add bdms to rebuild_instance
* 2.19 - Add node to run_instance
* 2.20 - Add node to prep_resize
* 2.21 - Add migrate_data dict param to pre_live_migration()
* 2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
* 2.23 - Remove network_info from reboot_instance
* 2.24 - Added get_spice_console method
* 2.25 - Add attach_interface() and detach_interface()
* 2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
* 2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
* 2.28 - Adds check_instance_shared_storage()
* 2.29 - Made start_instance() and stop_instance() take new-world
instance objects
* 2.30 - Adds live_snapshot_instance()
* 2.31 - Adds shelve_instance(), shelve_offload_instance, and
unshelve_instance()
* 2.32 - Make reboot_instance take a new world instance object
* 2.33 - Made suspend_instance() and resume_instance() take new-world
instance objects
* 2.34 - Added swap_volume()
* 2.35 - Made terminate_instance() and soft_delete_instance() take
new-world instance objects
* 2.36 - Made pause_instance() and unpause_instance() take new-world
instance objects
* 2.37 - Added the legacy_bdm_in_spec parameter to run_instance
* 2.38 - Made check_can_live_migrate_[destination|source] take
new-world instance objects
* 2.39 - Made revert_resize() and confirm_resize() take new-world
instance objects
* 2.40 - Made reset_network() take new-world instance object
* 2.41 - Make inject_network_info take new-world instance object
* 2.42 - Splits snapshot_instance() into snapshot_instance() and
backup_instance() and makes them take new-world instance
objects.
* 2.43 - Made prep_resize() take new-world instance object
* 2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
* 2.45 - Made resize_instance() take new-world objects
* 2.46 - Made finish_resize() take new-world objects
* 2.47 - Made finish_revert_resize() take new-world objects
... Havana supports message version 2.47. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.47.
* 2.48 - Make add_aggregate_host() and remove_aggregate_host() take
new-world objects
* ... - Remove live_snapshot() that was never actually used
* 3.0 - Remove 2.x compatibility
* 3.1 - Update get_spice_console() to take an instance object
* 3.2 - Update get_vnc_console() to take an instance object
* 3.3 - Update validate_console_port() to take an instance object
* 3.4 - Update rebuild_instance() to take an instance object
* 3.5 - Pass preserve_ephemeral flag to rebuild_instance()
* 3.6 - Make volume_snapshot_{create,delete} use new-world objects
* 3.7 - Update change_instance_metadata() to take an instance object
* 3.8 - Update set_admin_password() to take an instance object
* 3.9 - Update rescue_instance() to take an instance object
* 3.10 - Added get_rdp_console method
* 3.11 - Update unrescue_instance() to take an object
* 3.12 - Update add_fixed_ip_to_instance() to take an object
* 3.13 - Update remove_fixed_ip_from_instance() to take an object
* 3.14 - Update post_live_migration_at_destination() to take an object
* 3.15 - Adds filter_properties and node to unshelve_instance()
* 3.16 - Make reserve_block_device_name and attach_volume use new-world
objects, and add disk_bus and device_type params to
reserve_block_device_name, and bdm param to attach_volume
* 3.17 - Update attach_interface and detach_interface to take an object
* 3.18 - Update get_diagnostics() to take an instance object
* Removed inject_file(), as it was unused.
* 3.19 - Update pre_live_migration to take instance object
* 3.20 - Make restore_instance take an instance object
* 3.21 - Made rebuild take new-world BDM objects
* 3.22 - Made terminate_instance take new-world BDM objects
* 3.23 - Added external_instance_event()
* build_and_run_instance was added in Havana and not used or
documented.
... Icehouse supports message version 3.23. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.23.
* 3.24 - Update rescue_instance() to take optional rescue_image_ref
* 3.25 - Make detach_volume take an object
* 3.26 - Make live_migration() and
rollback_live_migration_at_destination() take an object
* ... Removed run_instance()
* 3.27 - Make run_instance() accept a new-world object
* 3.28 - Update get_console_output() to accept a new-world object
* 3.29 - Make check_instance_shared_storage accept a new-world object
* 3.30 - Make remove_volume_connection() accept a new-world object
* 3.31 - Add get_instance_diagnostics
* 3.32 - Add destroy_disks and migrate_data optional parameters to
rollback_live_migration_at_destination()
* 3.33 - Make build_and_run_instance() take a NetworkRequestList object
* 3.34 - Add get_serial_console method
* 3.35 - Make reserve_block_device_name return a BDM object
... Juno supports message version 3.35. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.35.
* 3.36 - Make build_and_run_instance() send a Flavor object
* 3.37 - Add clean_shutdown to stop, resize, rescue, shelve, and
shelve_offload
* 3.38 - Add clean_shutdown to prep_resize
'''
VERSION_ALIASES = {
'icehouse': '3.23',
'juno': '3.35',
}
def __init__(self):
super(ComputeAPI, self).__init__()
target = messaging.Target(topic=CONF.compute_topic, version='3.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.compute,
CONF.upgrade_levels.compute)
serializer = objects_base.NovaObjectSerializer()
self.client = self.get_client(target, version_cap, serializer)
# Cells overrides this
def get_client(self, target, version_cap, serializer):
return rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
version = '3.12'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
instance=instance, network_id=network_id)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
version = '3.17'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'attach_interface',
instance=instance, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, volume_id, mountpoint, bdm=None):
# NOTE(ndipanov): Remove volume_id and mountpoint on the next major
# version bump - they are not needed when using bdm objects.
version = '3.16'
kw = {'instance': instance, 'volume_id': volume_id,
'mountpoint': mountpoint, 'bdm': bdm}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'attach_volume', **kw)
def change_instance_metadata(self, ctxt, instance, diff):
version = '3.7'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'change_instance_metadata',
instance=instance, diff=diff)
def _warn_buggy_live_migrations(self, data=None):
# NOTE(danms): We know that libvirt live migration with shared block
# storage was buggy (potential loss of data) before version 3.32.
# Since we need to support live migration with older clients, we need
# to warn the operator of this possibility. The logic below tries to
# decide if a warning should be emitted, assuming the positive if
# not sure. This can be removed when we bump to RPC API version 4.0.
if data:
if data.get('is_shared_block_storage') is not False:
# Shared block storage, or unknown
should_warn = True
else:
# Specifically not shared block storage
should_warn = False
else:
# Unknown, so warn to be safe
should_warn = True
if should_warn:
LOG.warning(_LW('Live migration with clients before RPC version '
'3.32 is known to be buggy with shared block '
'storage. See '
'https://bugs.launchpad.net/nova/+bug/1250751 for '
'more information!'))
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
if self.client.can_send_version('3.32'):
version = '3.32'
else:
version = '3.0'
self._warn_buggy_live_migrations()
cctxt = self.client.prepare(server=destination, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
if self.client.can_send_version('3.32'):
version = '3.32'
else:
version = '3.0'
self._warn_buggy_live_migrations()
source = _compute_host(None, instance)
cctxt = self.client.prepare(server=source, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data):
if self.client.can_send_version('3.29'):
version = '3.29'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance,
data=data)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
reservations=reservations)
def detach_interface(self, ctxt, instance, port_id):
version = '3.17'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_interface',
instance=instance, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
if self.client.can_send_version('3.25'):
version = '3.25'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_volume',
instance=instance, volume_id=volume_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def get_console_output(self, ctxt, instance, tail_length):
if self.client.can_send_version('3.28'):
version = '3.28'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_console_output',
instance=instance, tail_length=tail_length)
def get_console_pool_info(self, ctxt, console_type, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
def get_console_topic(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
version = '3.18'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_diagnostics', instance=instance)
def get_instance_diagnostics(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
kwargs = {'instance': instance_p}
version = '3.31'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_instance_diagnostics', **kwargs)
def get_vnc_console(self, ctxt, instance, console_type):
version = '3.2'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
version = '3.1'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_spice_console',
instance=instance, console_type=console_type)
def get_rdp_console(self, ctxt, instance, console_type):
version = '3.10'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_rdp_console',
instance=instance, console_type=console_type)
def get_serial_console(self, ctxt, instance, console_type):
version = '3.34'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_serial_console',
instance=instance, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
version = '3.3'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'validate_console_port',
instance=instance, port=port,
console_type=console_type)
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, action, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_network_info(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migrate_data=None):
if self.client.can_send_version('3.26'):
version = '3.26'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'live_migration', instance=instance,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
version = '3.14'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'post_live_migration_at_destination',
instance=instance, block_migration=block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
version = '3.19'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None,
clean_shutdown=True):
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
msg_args = {'instance': instance,
'instance_type': instance_type_p,
'image': image_p,
'reservations': reservations,
'request_spec': request_spec,
'filter_properties': filter_properties,
'node': node,
'clean_shutdown': clean_shutdown}
version = '3.38'
if not self.client.can_send_version(version):
del msg_args['clean_shutdown']
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize', **msg_args)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
reboot_type=reboot_type)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, kwargs=None):
# NOTE(danms): kwargs is only here for cells compatibility, don't
# actually send it to compute
extra = {'preserve_ephemeral': preserve_ephemeral}
version = '3.21'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
recreate=recreate, on_shared_storage=on_shared_storage,
**extra)
def refresh_provider_fw_rules(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
version = '3.13'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
if self.client.can_send_version('3.30'):
version = '3.30'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
msg_args = {'rescue_password': rescue_password}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
msg_args['rescue_image_ref'] = rescue_image_ref
elif self.client.can_send_version('3.24'):
version = '3.24'
msg_args['rescue_image_ref'] = rescue_image_ref
else:
version = '3.9'
msg_args['instance'] = instance
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'rescue_instance', **msg_args)
def reset_network(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None, clean_shutdown=True):
instance_type_p = jsonutils.to_primitive(instance_type)
msg_args = {'instance': instance, 'migration': migration,
'image': image, 'reservations': reservations,
'instance_type': instance_type_p}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
def resume_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance, host,
destroy_disks=True,
migrate_data=None):
if self.client.can_send_version('3.32'):
version = '3.32'
extra = {'destroy_disks': destroy_disks,
'migrate_data': migrate_data,
}
else:
version = '3.0'
extra = {}
self._warn_buggy_live_migrations(migrate_data)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance, **extra)
# NOTE(alaski): Remove this method when the scheduler rpc interface is
# bumped to 4.x as the only callers of this method will be removed.
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node=None, legacy_bdm_in_spec=True):
if self.client.can_send_version('3.27'):
version = '3.27'
else:
version = '3.0'
instance = jsonutils.to_primitive(instance)
msg_kwargs = {'instance': instance, 'request_spec': request_spec,
'filter_properties': filter_properties,
'requested_networks': requested_networks,
'injected_files': injected_files,
'admin_password': admin_password,
'is_first_time': is_first_time, 'node': node,
'legacy_bdm_in_spec': legacy_bdm_in_spec}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
version = '3.8'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'set_admin_password',
instance=instance, new_pass=new_pass)
def set_host_enabled(self, ctxt, enabled, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'swap_volume',
instance=instance, old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
def get_host_uptime(self, ctxt, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
kw = {'instance': instance, 'device': device,
'volume_id': volume_id, 'disk_bus': disk_bus,
'device_type': device_type, 'return_bdm_object': True}
if self.client.can_send_version('3.35'):
version = '3.35'
else:
del kw['return_bdm_object']
version = '3.16'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
volume_bdm = cctxt.call(ctxt, 'reserve_block_device_name', **kw)
if not isinstance(volume_bdm, objects.BlockDeviceMapping):
volume_bdm = objects.BlockDeviceMapping.get_by_volume_id(
ctxt, volume_id)
return volume_bdm
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'backup_instance',
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation)
def snapshot_instance(self, ctxt, instance, image_id):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id)
def start_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
msg_args = {'instance': instance}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', **msg_args)
def suspend_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
version = '3.22'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
version = '3.11'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unrescue_instance', instance=instance)
def soft_delete_instance(self, ctxt, instance, reservations=None):
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
def restore_instance(self, ctxt, instance):
version = '3.20'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'restore_instance', instance=instance)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
msg_args = {'instance': instance, 'image_id': image_id}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_instance', **msg_args)
def shelve_offload_instance(self, ctxt, instance,
clean_shutdown=True):
msg_args = {'instance': instance}
if self.client.can_send_version('3.37'):
version = '3.37'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '3.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_offload_instance', **msg_args)
def unshelve_instance(self, ctxt, instance, host, image=None,
filter_properties=None, node=None):
version = '3.15'
msg_kwargs = {
'instance': instance,
'image': image,
'filter_properties': filter_properties,
'node': node,
}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs)
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
version = '3.6'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance,
volume_id=volume_id, create_info=create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
version = '3.6'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance,
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
def external_instance_event(self, ctxt, instances, events):
cctxt = self.client.prepare(
server=_compute_host(None, instances[0]),
version='3.23')
cctxt.cast(ctxt, 'external_instance_event', instances=instances,
events=events)
def build_and_run_instance(self, ctxt, instance, host, image, request_spec,
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None):
version = '3.36'
if not self.client.can_send_version(version):
version = '3.33'
if 'instance_type' in filter_properties:
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties = dict(filter_properties,
instance_type=flavor_p)
if not self.client.can_send_version(version):
version = '3.23'
if requested_networks is not None:
requested_networks = [(network_id, address, port_id)
for (network_id, address, port_id, _) in
requested_networks.as_tuples()]
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'build_and_run_instance', instance=instance,
image=image, request_spec=request_spec,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping, node=node,
limits=limits)
class SecurityGroupAPI(object):
'''Client side of the security group rpc API.
API version history:
1.0 - Initial version.
1.41 - Adds refresh_instance_security_rules()
2.0 - Remove 1.x backwards compat
3.0 - Identical to 2.x, but has to be bumped at the same time as the
compute API since it's all together on the server side.
'''
def __init__(self):
super(SecurityGroupAPI, self).__init__()
target = messaging.Target(topic=CONF.compute_topic, version='3.0')
version_cap = ComputeAPI.VERSION_ALIASES.get(
CONF.upgrade_levels.compute, CONF.upgrade_levels.compute)
self.client = rpc.get_client(target, version_cap)
def refresh_security_group_rules(self, ctxt, security_group_id, host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_security_group_rules',
security_group_id=security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id,
host):
version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_security_group_members',
security_group_id=security_group_id)
def refresh_instance_security_rules(self, ctxt, host, instance):
version = '3.0'
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance_p)
| apache-2.0 | 4,497,065,453,178,087,000 | 44.857003 | 79 | 0.59293 | false |
aaltay/beam | sdks/python/apache_beam/transforms/trigger.py | 1 | 50821 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Support for Dataflow triggers.
Triggers control when in processing time windows get emitted.
"""
# pytype: skip-file
from __future__ import absolute_import
import collections
import copy
import logging
import numbers
from abc import ABCMeta
from abc import abstractmethod
from builtins import object
from future.moves.itertools import zip_longest
from future.utils import iteritems
from future.utils import with_metaclass
from apache_beam.coders import coder_impl
from apache_beam.coders import observable
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms import combiners
from apache_beam.transforms import core
from apache_beam.transforms.timeutil import TimeDomain
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.transforms.window import TimestampCombiner
from apache_beam.transforms.window import WindowedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import TIME_GRANULARITY
# AfterCount is experimental. No backwards compatibility guarantees.
__all__ = [
'AccumulationMode',
'TriggerFn',
'DefaultTrigger',
'AfterWatermark',
'AfterProcessingTime',
'AfterCount',
'Repeatedly',
'AfterAny',
'AfterAll',
'AfterEach',
'OrFinally',
]
_LOGGER = logging.getLogger(__name__)
class AccumulationMode(object):
"""Controls what to do with data when a trigger fires multiple times."""
DISCARDING = beam_runner_api_pb2.AccumulationMode.DISCARDING
ACCUMULATING = beam_runner_api_pb2.AccumulationMode.ACCUMULATING
# TODO(robertwb): Provide retractions of previous outputs.
# RETRACTING = 3
class _StateTag(with_metaclass(ABCMeta, object)): # type: ignore[misc]
"""An identifier used to store and retrieve typed, combinable state.
The given tag must be unique for this step."""
def __init__(self, tag):
self.tag = tag
class _ReadModifyWriteStateTag(_StateTag):
"""StateTag pointing to an element."""
def __repr__(self):
return 'ValueStateTag(%s)' % (self.tag)
def with_prefix(self, prefix):
return _ReadModifyWriteStateTag(prefix + self.tag)
class _SetStateTag(_StateTag):
"""StateTag pointing to an element."""
def __repr__(self):
return 'SetStateTag({tag})'.format(tag=self.tag)
def with_prefix(self, prefix):
return _SetStateTag(prefix + self.tag)
class _CombiningValueStateTag(_StateTag):
"""StateTag pointing to an element, accumulated with a combiner.
The given tag must be unique for this step. The given CombineFn will be
applied (possibly incrementally and eagerly) when adding elements."""
# TODO(robertwb): Also store the coder (perhaps extracted from the combine_fn)
def __init__(self, tag, combine_fn):
super(_CombiningValueStateTag, self).__init__(tag)
if not combine_fn:
raise ValueError('combine_fn must be specified.')
if not isinstance(combine_fn, core.CombineFn):
combine_fn = core.CombineFn.from_callable(combine_fn)
self.combine_fn = combine_fn
def __repr__(self):
return 'CombiningValueStateTag(%s, %s)' % (self.tag, self.combine_fn)
def with_prefix(self, prefix):
return _CombiningValueStateTag(prefix + self.tag, self.combine_fn)
def without_extraction(self):
class NoExtractionCombineFn(core.CombineFn):
setup = self.combine_fn.setup
create_accumulator = self.combine_fn.create_accumulator
add_input = self.combine_fn.add_input
merge_accumulators = self.combine_fn.merge_accumulators
compact = self.combine_fn.compact
extract_output = staticmethod(lambda x: x)
teardown = self.combine_fn.teardown
return _CombiningValueStateTag(self.tag, NoExtractionCombineFn())
class _ListStateTag(_StateTag):
"""StateTag pointing to a list of elements."""
def __repr__(self):
return 'ListStateTag(%s)' % self.tag
def with_prefix(self, prefix):
return _ListStateTag(prefix + self.tag)
class _WatermarkHoldStateTag(_StateTag):
def __init__(self, tag, timestamp_combiner_impl):
super(_WatermarkHoldStateTag, self).__init__(tag)
self.timestamp_combiner_impl = timestamp_combiner_impl
def __repr__(self):
return 'WatermarkHoldStateTag(%s, %s)' % (
self.tag, self.timestamp_combiner_impl)
def with_prefix(self, prefix):
return _WatermarkHoldStateTag(
prefix + self.tag, self.timestamp_combiner_impl)
# pylint: disable=unused-argument
# TODO(robertwb): Provisional API, Java likely to change as well.
class TriggerFn(with_metaclass(ABCMeta, object)): # type: ignore[misc]
"""A TriggerFn determines when window (panes) are emitted.
See https://beam.apache.org/documentation/programming-guide/#triggers
"""
@abstractmethod
def on_element(self, element, window, context):
"""Called when a new element arrives in a window.
Args:
element: the element being added
window: the window to which the element is being added
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
"""
pass
@abstractmethod
def on_merge(self, to_be_merged, merge_result, context):
"""Called when multiple windows are merged.
Args:
to_be_merged: the set of windows to be merged
merge_result: the window into which the windows are being merged
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
"""
pass
@abstractmethod
def should_fire(self, time_domain, timestamp, window, context):
"""Whether this trigger should cause the window to fire.
Args:
time_domain: WATERMARK for event-time timers and REAL_TIME for
processing-time timers.
timestamp: for time_domain WATERMARK, it represents the
watermark: (a lower bound on) the watermark of the system
and for time_domain REAL_TIME, it represents the
trigger: timestamp of the processing-time timer.
window: the window whose trigger is being considered
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
Returns:
whether this trigger should cause a firing
"""
pass
@abstractmethod
def has_ontime_pane(self):
"""Whether this trigger creates an empty pane even if there are no elements.
Returns:
True if this trigger guarantees that there will always be an ON_TIME pane
even if there are no elements in that pane.
"""
pass
@abstractmethod
def on_fire(self, watermark, window, context):
"""Called when a trigger actually fires.
Args:
watermark: (a lower bound on) the watermark of the system
window: the window whose trigger is being fired
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
Returns:
whether this trigger is finished
"""
pass
@abstractmethod
def reset(self, window, context):
"""Clear any state and timers used by this TriggerFn."""
pass
# pylint: enable=unused-argument
@staticmethod
def from_runner_api(proto, context):
return {
'after_all': AfterAll,
'after_any': AfterAny,
'after_each': AfterEach,
'after_end_of_window': AfterWatermark,
'after_processing_time': AfterProcessingTime,
# after_processing_time, after_synchronized_processing_time
'always': Always,
'default': DefaultTrigger,
'element_count': AfterCount,
'never': _Never,
'or_finally': OrFinally,
'repeat': Repeatedly,
}[proto.WhichOneof('trigger')].from_runner_api(proto, context)
@abstractmethod
def to_runner_api(self, unused_context):
pass
class DefaultTrigger(TriggerFn):
"""Semantically Repeatedly(AfterWatermark()), but more optimized."""
def __init__(self):
pass
def __repr__(self):
return 'DefaultTrigger()'
def on_element(self, element, window, context):
context.set_timer('', TimeDomain.WATERMARK, window.end)
def on_merge(self, to_be_merged, merge_result, context):
# Note: Timer clearing solely an optimization.
for window in to_be_merged:
if window.end != merge_result.end:
context.clear_timer('', TimeDomain.WATERMARK)
def should_fire(self, time_domain, watermark, window, context):
if watermark >= window.end:
# Explicitly clear the timer so that late elements are not emitted again
# when the timer is fired.
context.clear_timer('', TimeDomain.WATERMARK)
return watermark >= window.end
def on_fire(self, watermark, window, context):
return False
def reset(self, window, context):
context.clear_timer('', TimeDomain.WATERMARK)
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
@staticmethod
def from_runner_api(proto, context):
return DefaultTrigger()
def to_runner_api(self, unused_context):
return beam_runner_api_pb2.Trigger(
default=beam_runner_api_pb2.Trigger.Default())
def has_ontime_pane(self):
return True
class AfterProcessingTime(TriggerFn):
"""Fire exactly once after a specified delay from processing time.
AfterProcessingTime is experimental. No backwards compatibility guarantees.
"""
def __init__(self, delay=0):
"""Initialize a processing time trigger with a delay in seconds."""
self.delay = delay
def __repr__(self):
return 'AfterProcessingTime(delay=%d)' % self.delay
def on_element(self, element, window, context):
context.set_timer(
'', TimeDomain.REAL_TIME, context.get_current_time() + self.delay)
def on_merge(self, to_be_merged, merge_result, context):
# timers will be kept through merging
pass
def should_fire(self, time_domain, timestamp, window, context):
if time_domain == TimeDomain.REAL_TIME:
return True
def on_fire(self, timestamp, window, context):
return True
def reset(self, window, context):
pass
@staticmethod
def from_runner_api(proto, context):
return AfterProcessingTime(
delay=(
proto.after_processing_time.timestamp_transforms[0].delay.
delay_millis) // 1000)
def to_runner_api(self, context):
delay_proto = beam_runner_api_pb2.TimestampTransform(
delay=beam_runner_api_pb2.TimestampTransform.Delay(
delay_millis=self.delay * 1000))
return beam_runner_api_pb2.Trigger(
after_processing_time=beam_runner_api_pb2.Trigger.AfterProcessingTime(
timestamp_transforms=[delay_proto]))
def has_ontime_pane(self):
return False
class Always(TriggerFn):
"""Repeatedly invoke the given trigger, never finishing."""
def __init__(self):
pass
def __repr__(self):
return 'Always'
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return 1
def on_element(self, element, window, context):
pass
def on_merge(self, to_be_merged, merge_result, context):
pass
def has_ontime_pane(self):
return False
def reset(self, window, context):
pass
def should_fire(self, time_domain, watermark, window, context):
return True
def on_fire(self, watermark, window, context):
return False
@staticmethod
def from_runner_api(proto, context):
return Always()
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
always=beam_runner_api_pb2.Trigger.Always())
class _Never(TriggerFn):
"""A trigger that never fires.
Data may still be released at window closing.
"""
def __init__(self):
pass
def __repr__(self):
return 'Never'
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def on_element(self, element, window, context):
pass
def on_merge(self, to_be_merged, merge_result, context):
pass
def has_ontime_pane(self):
False
def reset(self, window, context):
pass
def should_fire(self, time_domain, watermark, window, context):
return False
def on_fire(self, watermark, window, context):
return True
@staticmethod
def from_runner_api(proto, context):
return _Never()
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
never=beam_runner_api_pb2.Trigger.Never())
class AfterWatermark(TriggerFn):
"""Fire exactly once when the watermark passes the end of the window.
Args:
early: if not None, a speculative trigger to repeatedly evaluate before
the watermark passes the end of the window
late: if not None, a speculative trigger to repeatedly evaluate after
the watermark passes the end of the window
"""
LATE_TAG = _CombiningValueStateTag('is_late', any)
def __init__(self, early=None, late=None):
self.early = Repeatedly(early) if early else None
self.late = Repeatedly(late) if late else None
def __repr__(self):
qualifiers = []
if self.early:
qualifiers.append('early=%s' % self.early.underlying)
if self.late:
qualifiers.append('late=%s' % self.late.underlying)
return 'AfterWatermark(%s)' % ', '.join(qualifiers)
def is_late(self, context):
return self.late and context.get_state(self.LATE_TAG)
def on_element(self, element, window, context):
if self.is_late(context):
self.late.on_element(element, window, NestedContext(context, 'late'))
else:
context.set_timer('', TimeDomain.WATERMARK, window.end)
if self.early:
self.early.on_element(element, window, NestedContext(context, 'early'))
def on_merge(self, to_be_merged, merge_result, context):
# TODO(robertwb): Figure out whether the 'rewind' semantics could be used
# here.
if self.is_late(context):
self.late.on_merge(
to_be_merged, merge_result, NestedContext(context, 'late'))
else:
# Note: Timer clearing solely an optimization.
for window in to_be_merged:
if window.end != merge_result.end:
context.clear_timer('', TimeDomain.WATERMARK)
if self.early:
self.early.on_merge(
to_be_merged, merge_result, NestedContext(context, 'early'))
def should_fire(self, time_domain, watermark, window, context):
if self.is_late(context):
return self.late.should_fire(
time_domain, watermark, window, NestedContext(context, 'late'))
elif watermark >= window.end:
# Explicitly clear the timer so that late elements are not emitted again
# when the timer is fired.
context.clear_timer('', TimeDomain.WATERMARK)
return True
elif self.early:
return self.early.should_fire(
time_domain, watermark, window, NestedContext(context, 'early'))
return False
def on_fire(self, watermark, window, context):
if self.is_late(context):
return self.late.on_fire(
watermark, window, NestedContext(context, 'late'))
elif watermark >= window.end:
context.add_state(self.LATE_TAG, True)
return not self.late
elif self.early:
self.early.on_fire(watermark, window, NestedContext(context, 'early'))
return False
def reset(self, window, context):
if self.late:
context.clear_state(self.LATE_TAG)
if self.early:
self.early.reset(window, NestedContext(context, 'early'))
if self.late:
self.late.reset(window, NestedContext(context, 'late'))
def __eq__(self, other):
return (
type(self) == type(other) and self.early == other.early and
self.late == other.late)
def __hash__(self):
return hash((type(self), self.early, self.late))
@staticmethod
def from_runner_api(proto, context):
return AfterWatermark(
early=TriggerFn.from_runner_api(
proto.after_end_of_window.early_firings, context)
if proto.after_end_of_window.HasField('early_firings') else None,
late=TriggerFn.from_runner_api(
proto.after_end_of_window.late_firings, context)
if proto.after_end_of_window.HasField('late_firings') else None)
def to_runner_api(self, context):
early_proto = self.early.underlying.to_runner_api(
context) if self.early else None
late_proto = self.late.underlying.to_runner_api(
context) if self.late else None
return beam_runner_api_pb2.Trigger(
after_end_of_window=beam_runner_api_pb2.Trigger.AfterEndOfWindow(
early_firings=early_proto, late_firings=late_proto))
def has_ontime_pane(self):
return True
class AfterCount(TriggerFn):
"""Fire when there are at least count elements in this window pane.
AfterCount is experimental. No backwards compatibility guarantees.
"""
COUNT_TAG = _CombiningValueStateTag('count', combiners.CountCombineFn())
def __init__(self, count):
if not isinstance(count, numbers.Integral) or count < 1:
raise ValueError("count (%d) must be a positive integer." % count)
self.count = count
def __repr__(self):
return 'AfterCount(%s)' % self.count
def __eq__(self, other):
return type(self) == type(other) and self.count == other.count
def __hash__(self):
return hash(self.count)
def on_element(self, element, window, context):
context.add_state(self.COUNT_TAG, 1)
def on_merge(self, to_be_merged, merge_result, context):
# states automatically merged
pass
def should_fire(self, time_domain, watermark, window, context):
return context.get_state(self.COUNT_TAG) >= self.count
def on_fire(self, watermark, window, context):
return True
def reset(self, window, context):
context.clear_state(self.COUNT_TAG)
@staticmethod
def from_runner_api(proto, unused_context):
return AfterCount(proto.element_count.element_count)
def to_runner_api(self, unused_context):
return beam_runner_api_pb2.Trigger(
element_count=beam_runner_api_pb2.Trigger.ElementCount(
element_count=self.count))
def has_ontime_pane(self):
return False
class Repeatedly(TriggerFn):
"""Repeatedly invoke the given trigger, never finishing."""
def __init__(self, underlying):
self.underlying = underlying
def __repr__(self):
return 'Repeatedly(%s)' % self.underlying
def __eq__(self, other):
return type(self) == type(other) and self.underlying == other.underlying
def __hash__(self):
return hash(self.underlying)
def on_element(self, element, window, context):
self.underlying.on_element(element, window, context)
def on_merge(self, to_be_merged, merge_result, context):
self.underlying.on_merge(to_be_merged, merge_result, context)
def should_fire(self, time_domain, watermark, window, context):
return self.underlying.should_fire(time_domain, watermark, window, context)
def on_fire(self, watermark, window, context):
if self.underlying.on_fire(watermark, window, context):
self.underlying.reset(window, context)
return False
def reset(self, window, context):
self.underlying.reset(window, context)
@staticmethod
def from_runner_api(proto, context):
return Repeatedly(
TriggerFn.from_runner_api(proto.repeat.subtrigger, context))
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
repeat=beam_runner_api_pb2.Trigger.Repeat(
subtrigger=self.underlying.to_runner_api(context)))
def has_ontime_pane(self):
return self.underlying.has_ontime_pane()
class _ParallelTriggerFn(with_metaclass(ABCMeta, TriggerFn)): # type: ignore[misc]
def __init__(self, *triggers):
self.triggers = triggers
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__, ', '.join(str(t) for t in self.triggers))
def __eq__(self, other):
return type(self) == type(other) and self.triggers == other.triggers
def __hash__(self):
return hash(self.triggers)
@abstractmethod
def combine_op(self, trigger_results):
pass
def on_element(self, element, window, context):
for ix, trigger in enumerate(self.triggers):
trigger.on_element(element, window, self._sub_context(context, ix))
def on_merge(self, to_be_merged, merge_result, context):
for ix, trigger in enumerate(self.triggers):
trigger.on_merge(
to_be_merged, merge_result, self._sub_context(context, ix))
def should_fire(self, time_domain, watermark, window, context):
self._time_domain = time_domain
return self.combine_op(
trigger.should_fire(
time_domain, watermark, window, self._sub_context(context, ix))
for ix,
trigger in enumerate(self.triggers))
def on_fire(self, watermark, window, context):
finished = []
for ix, trigger in enumerate(self.triggers):
nested_context = self._sub_context(context, ix)
if trigger.should_fire(TimeDomain.WATERMARK,
watermark,
window,
nested_context):
finished.append(trigger.on_fire(watermark, window, nested_context))
return self.combine_op(finished)
def reset(self, window, context):
for ix, trigger in enumerate(self.triggers):
trigger.reset(window, self._sub_context(context, ix))
@staticmethod
def _sub_context(context, index):
return NestedContext(context, '%d/' % index)
@staticmethod
def from_runner_api(proto, context):
subtriggers = [
TriggerFn.from_runner_api(subtrigger, context) for subtrigger in
proto.after_all.subtriggers or proto.after_any.subtriggers
]
if proto.after_all.subtriggers:
return AfterAll(*subtriggers)
else:
return AfterAny(*subtriggers)
def to_runner_api(self, context):
subtriggers = [
subtrigger.to_runner_api(context) for subtrigger in self.triggers
]
if self.combine_op == all:
return beam_runner_api_pb2.Trigger(
after_all=beam_runner_api_pb2.Trigger.AfterAll(
subtriggers=subtriggers))
elif self.combine_op == any:
return beam_runner_api_pb2.Trigger(
after_any=beam_runner_api_pb2.Trigger.AfterAny(
subtriggers=subtriggers))
else:
raise NotImplementedError(self)
def has_ontime_pane(self):
return any(t.has_ontime_pane() for t in self.triggers)
class AfterAny(_ParallelTriggerFn):
"""Fires when any subtrigger fires.
Also finishes when any subtrigger finishes.
"""
combine_op = any
class AfterAll(_ParallelTriggerFn):
"""Fires when all subtriggers have fired.
Also finishes when all subtriggers have finished.
"""
combine_op = all
class AfterEach(TriggerFn):
INDEX_TAG = _CombiningValueStateTag(
'index', (lambda indices: 0 if not indices else max(indices)))
def __init__(self, *triggers):
self.triggers = triggers
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__, ', '.join(str(t) for t in self.triggers))
def __eq__(self, other):
return type(self) == type(other) and self.triggers == other.triggers
def __hash__(self):
return hash(self.triggers)
def on_element(self, element, window, context):
ix = context.get_state(self.INDEX_TAG)
if ix < len(self.triggers):
self.triggers[ix].on_element(
element, window, self._sub_context(context, ix))
def on_merge(self, to_be_merged, merge_result, context):
# This takes the furthest window on merging.
# TODO(robertwb): Revisit this when merging windows logic is settled for
# all possible merging situations.
ix = context.get_state(self.INDEX_TAG)
if ix < len(self.triggers):
self.triggers[ix].on_merge(
to_be_merged, merge_result, self._sub_context(context, ix))
def should_fire(self, time_domain, watermark, window, context):
ix = context.get_state(self.INDEX_TAG)
if ix < len(self.triggers):
return self.triggers[ix].should_fire(
time_domain, watermark, window, self._sub_context(context, ix))
def on_fire(self, watermark, window, context):
ix = context.get_state(self.INDEX_TAG)
if ix < len(self.triggers):
if self.triggers[ix].on_fire(watermark,
window,
self._sub_context(context, ix)):
ix += 1
context.add_state(self.INDEX_TAG, ix)
return ix == len(self.triggers)
def reset(self, window, context):
context.clear_state(self.INDEX_TAG)
for ix, trigger in enumerate(self.triggers):
trigger.reset(window, self._sub_context(context, ix))
@staticmethod
def _sub_context(context, index):
return NestedContext(context, '%d/' % index)
@staticmethod
def from_runner_api(proto, context):
return AfterEach(
*[
TriggerFn.from_runner_api(subtrigger, context)
for subtrigger in proto.after_each.subtriggers
])
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
after_each=beam_runner_api_pb2.Trigger.AfterEach(
subtriggers=[
subtrigger.to_runner_api(context)
for subtrigger in self.triggers
]))
def has_ontime_pane(self):
return any(t.has_ontime_pane() for t in self.triggers)
class OrFinally(AfterAny):
@staticmethod
def from_runner_api(proto, context):
return OrFinally(
TriggerFn.from_runner_api(proto.or_finally.main, context),
# getattr is used as finally is a keyword in Python
TriggerFn.from_runner_api(
getattr(proto.or_finally, 'finally'), context))
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
or_finally=beam_runner_api_pb2.Trigger.OrFinally(
main=self.triggers[0].to_runner_api(context),
# dict keyword argument is used as finally is a keyword in Python
**{'finally': self.triggers[1].to_runner_api(context)}))
class TriggerContext(object):
def __init__(self, outer, window, clock):
self._outer = outer
self._window = window
self._clock = clock
def get_current_time(self):
return self._clock.time()
def set_timer(self, name, time_domain, timestamp):
self._outer.set_timer(self._window, name, time_domain, timestamp)
def clear_timer(self, name, time_domain):
self._outer.clear_timer(self._window, name, time_domain)
def add_state(self, tag, value):
self._outer.add_state(self._window, tag, value)
def get_state(self, tag):
return self._outer.get_state(self._window, tag)
def clear_state(self, tag):
return self._outer.clear_state(self._window, tag)
class NestedContext(object):
"""Namespaced context useful for defining composite triggers."""
def __init__(self, outer, prefix):
self._outer = outer
self._prefix = prefix
def get_current_time(self):
return self._outer.get_current_time()
def set_timer(self, name, time_domain, timestamp):
self._outer.set_timer(self._prefix + name, time_domain, timestamp)
def clear_timer(self, name, time_domain):
self._outer.clear_timer(self._prefix + name, time_domain)
def add_state(self, tag, value):
self._outer.add_state(tag.with_prefix(self._prefix), value)
def get_state(self, tag):
return self._outer.get_state(tag.with_prefix(self._prefix))
def clear_state(self, tag):
self._outer.clear_state(tag.with_prefix(self._prefix))
# pylint: disable=unused-argument
class SimpleState(with_metaclass(ABCMeta, object)): # type: ignore[misc]
"""Basic state storage interface used for triggering.
Only timers must hold the watermark (by their timestamp).
"""
@abstractmethod
def set_timer(
self, window, name, time_domain, timestamp, dynamic_timer_tag=''):
pass
@abstractmethod
def get_window(self, window_id):
pass
@abstractmethod
def clear_timer(self, window, name, time_domain, dynamic_timer_tag=''):
pass
@abstractmethod
def add_state(self, window, tag, value):
pass
@abstractmethod
def get_state(self, window, tag):
pass
@abstractmethod
def clear_state(self, window, tag):
pass
def at(self, window, clock):
return NestedContext(TriggerContext(self, window, clock), 'trigger')
class UnmergedState(SimpleState):
"""State suitable for use in TriggerDriver.
This class must be implemented by each backend.
"""
@abstractmethod
def set_global_state(self, tag, value):
pass
@abstractmethod
def get_global_state(self, tag, default=None):
pass
# pylint: enable=unused-argument
class MergeableStateAdapter(SimpleState):
"""Wraps an UnmergedState, tracking merged windows."""
# TODO(robertwb): A similar indirection could be used for sliding windows
# or other window_fns when a single element typically belongs to many windows.
WINDOW_IDS = _ReadModifyWriteStateTag('window_ids')
def __init__(self, raw_state):
self.raw_state = raw_state
self.window_ids = self.raw_state.get_global_state(self.WINDOW_IDS, {})
self.counter = None
def set_timer(
self, window, name, time_domain, timestamp, dynamic_timer_tag=''):
self.raw_state.set_timer(
self._get_id(window),
name,
time_domain,
timestamp,
dynamic_timer_tag=dynamic_timer_tag)
def clear_timer(self, window, name, time_domain, dynamic_timer_tag=''):
for window_id in self._get_ids(window):
self.raw_state.clear_timer(
window_id, name, time_domain, dynamic_timer_tag=dynamic_timer_tag)
def add_state(self, window, tag, value):
if isinstance(tag, _ReadModifyWriteStateTag):
raise ValueError(
'Merging requested for non-mergeable state tag: %r.' % tag)
elif isinstance(tag, _CombiningValueStateTag):
tag = tag.without_extraction()
self.raw_state.add_state(self._get_id(window), tag, value)
def get_state(self, window, tag):
if isinstance(tag, _CombiningValueStateTag):
original_tag, tag = tag, tag.without_extraction()
values = [
self.raw_state.get_state(window_id, tag)
for window_id in self._get_ids(window)
]
if isinstance(tag, _ReadModifyWriteStateTag):
raise ValueError(
'Merging requested for non-mergeable state tag: %r.' % tag)
elif isinstance(tag, _CombiningValueStateTag):
return original_tag.combine_fn.extract_output(
original_tag.combine_fn.merge_accumulators(values))
elif isinstance(tag, _ListStateTag):
return [v for vs in values for v in vs]
elif isinstance(tag, _SetStateTag):
return {v for vs in values for v in vs}
elif isinstance(tag, _WatermarkHoldStateTag):
return tag.timestamp_combiner_impl.combine_all(values)
else:
raise ValueError('Invalid tag.', tag)
def clear_state(self, window, tag):
for window_id in self._get_ids(window):
self.raw_state.clear_state(window_id, tag)
if tag is None:
del self.window_ids[window]
self._persist_window_ids()
def merge(self, to_be_merged, merge_result):
for window in to_be_merged:
if window != merge_result:
if window in self.window_ids:
if merge_result in self.window_ids:
merge_window_ids = self.window_ids[merge_result]
else:
merge_window_ids = self.window_ids[merge_result] = []
merge_window_ids.extend(self.window_ids.pop(window))
self._persist_window_ids()
def known_windows(self):
return list(self.window_ids)
def get_window(self, window_id):
for window, ids in self.window_ids.items():
if window_id in ids:
return window
raise ValueError('No window for %s' % window_id)
def _get_id(self, window):
if window in self.window_ids:
return self.window_ids[window][0]
window_id = self._get_next_counter()
self.window_ids[window] = [window_id]
self._persist_window_ids()
return window_id
def _get_ids(self, window):
return self.window_ids.get(window, [])
def _get_next_counter(self):
if not self.window_ids:
self.counter = 0
elif self.counter is None:
self.counter = max(k for ids in self.window_ids.values() for k in ids)
self.counter += 1
return self.counter
def _persist_window_ids(self):
self.raw_state.set_global_state(self.WINDOW_IDS, self.window_ids)
def __repr__(self):
return '\n\t'.join([repr(self.window_ids)] +
repr(self.raw_state).split('\n'))
def create_trigger_driver(
windowing, is_batch=False, phased_combine_fn=None, clock=None):
"""Create the TriggerDriver for the given windowing and options."""
# TODO(BEAM-10149): Respect closing and on-time behaviors.
# For batch, we should always fire once, no matter what.
if is_batch and windowing.triggerfn == _Never():
windowing = copy.copy(windowing)
windowing.triggerfn = Always()
# TODO(robertwb): We can do more if we know elements are in timestamp
# sorted order.
if windowing.is_default() and is_batch:
driver = BatchGlobalTriggerDriver()
elif (windowing.windowfn == GlobalWindows() and
(windowing.triggerfn in [AfterCount(1), Always()]) and is_batch):
# Here we also just pass through all the values exactly once.
driver = BatchGlobalTriggerDriver()
else:
driver = GeneralTriggerDriver(windowing, clock)
if phased_combine_fn:
# TODO(ccy): Refactor GeneralTriggerDriver to combine values eagerly using
# the known phased_combine_fn here.
driver = CombiningTriggerDriver(phased_combine_fn, driver)
return driver
class TriggerDriver(with_metaclass(ABCMeta, object)): # type: ignore[misc]
"""Breaks a series of bundle and timer firings into window (pane)s."""
@abstractmethod
def process_elements(
self,
state,
windowed_values,
output_watermark,
input_watermark=MIN_TIMESTAMP):
pass
@abstractmethod
def process_timer(
self,
window_id,
name,
time_domain,
timestamp,
state,
input_watermark=None):
pass
def process_entire_key(self, key, windowed_values):
state = InMemoryUnmergedState()
for wvalue in self.process_elements(state,
windowed_values,
MIN_TIMESTAMP,
MIN_TIMESTAMP):
yield wvalue.with_value((key, wvalue.value))
while state.timers:
fired = state.get_and_clear_timers()
for timer_window, (name, time_domain, fire_time, _) in fired:
for wvalue in self.process_timer(timer_window,
name,
time_domain,
fire_time,
state):
yield wvalue.with_value((key, wvalue.value))
class _UnwindowedValues(observable.ObservableMixin):
"""Exposes iterable of windowed values as iterable of unwindowed values."""
def __init__(self, windowed_values):
super(_UnwindowedValues, self).__init__()
self._windowed_values = windowed_values
def __iter__(self):
for wv in self._windowed_values:
unwindowed_value = wv.value
self.notify_observers(unwindowed_value)
yield unwindowed_value
def __repr__(self):
return '<_UnwindowedValues of %s>' % self._windowed_values
def __reduce__(self):
return list, (list(self), )
def __eq__(self, other):
if isinstance(other, collections.Iterable):
return all(
a == b for a, b in zip_longest(self, other, fillvalue=object()))
else:
return NotImplemented
def __hash__(self):
return hash(tuple(self))
coder_impl.FastPrimitivesCoderImpl.register_iterable_like_type(
_UnwindowedValues)
class BatchGlobalTriggerDriver(TriggerDriver):
"""Groups all received values together.
"""
GLOBAL_WINDOW_TUPLE = (GlobalWindow(), )
ONLY_FIRING = windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)
def process_elements(
self,
state,
windowed_values,
unused_output_watermark,
unused_input_watermark=MIN_TIMESTAMP):
yield WindowedValue(
_UnwindowedValues(windowed_values),
MIN_TIMESTAMP,
self.GLOBAL_WINDOW_TUPLE,
self.ONLY_FIRING)
def process_timer(
self,
window_id,
name,
time_domain,
timestamp,
state,
input_watermark=None):
raise TypeError('Triggers never set or called for batch default windowing.')
class CombiningTriggerDriver(TriggerDriver):
"""Uses a phased_combine_fn to process output of wrapped TriggerDriver."""
def __init__(self, phased_combine_fn, underlying):
self.phased_combine_fn = phased_combine_fn
self.underlying = underlying
def process_elements(
self,
state,
windowed_values,
output_watermark,
input_watermark=MIN_TIMESTAMP):
uncombined = self.underlying.process_elements(
state, windowed_values, output_watermark, input_watermark)
for output in uncombined:
yield output.with_value(self.phased_combine_fn.apply(output.value))
def process_timer(
self,
window_id,
name,
time_domain,
timestamp,
state,
input_watermark=None):
uncombined = self.underlying.process_timer(
window_id, name, time_domain, timestamp, state, input_watermark)
for output in uncombined:
yield output.with_value(self.phased_combine_fn.apply(output.value))
class GeneralTriggerDriver(TriggerDriver):
"""Breaks a series of bundle and timer firings into window (pane)s.
Suitable for all variants of Windowing.
"""
ELEMENTS = _ListStateTag('elements')
TOMBSTONE = _CombiningValueStateTag('tombstone', combiners.CountCombineFn())
INDEX = _CombiningValueStateTag('index', combiners.CountCombineFn())
NONSPECULATIVE_INDEX = _CombiningValueStateTag(
'nonspeculative_index', combiners.CountCombineFn())
def __init__(self, windowing, clock):
self.clock = clock
self.allowed_lateness = windowing.allowed_lateness
self.window_fn = windowing.windowfn
self.timestamp_combiner_impl = TimestampCombiner.get_impl(
windowing.timestamp_combiner, self.window_fn)
# pylint: disable=invalid-name
self.WATERMARK_HOLD = _WatermarkHoldStateTag(
'watermark', self.timestamp_combiner_impl)
# pylint: enable=invalid-name
self.trigger_fn = windowing.triggerfn
self.accumulation_mode = windowing.accumulation_mode
self.is_merging = True
def process_elements(
self,
state,
windowed_values,
output_watermark,
input_watermark=MIN_TIMESTAMP):
if self.is_merging:
state = MergeableStateAdapter(state)
windows_to_elements = collections.defaultdict(list)
for wv in windowed_values:
for window in wv.windows:
# ignore expired windows
if input_watermark > window.end + self.allowed_lateness:
continue
windows_to_elements[window].append((wv.value, wv.timestamp))
# First handle merging.
if self.is_merging:
old_windows = set(state.known_windows())
all_windows = old_windows.union(list(windows_to_elements))
if all_windows != old_windows:
merged_away = {}
class TriggerMergeContext(WindowFn.MergeContext):
def merge(_, to_be_merged, merge_result): # pylint: disable=no-self-argument
for window in to_be_merged:
if window != merge_result:
merged_away[window] = merge_result
# Clear state associated with PaneInfo since it is
# not preserved across merges.
state.clear_state(window, self.INDEX)
state.clear_state(window, self.NONSPECULATIVE_INDEX)
state.merge(to_be_merged, merge_result)
# using the outer self argument.
self.trigger_fn.on_merge(
to_be_merged, merge_result, state.at(merge_result, self.clock))
self.window_fn.merge(TriggerMergeContext(all_windows))
merged_windows_to_elements = collections.defaultdict(list)
for window, values in windows_to_elements.items():
while window in merged_away:
window = merged_away[window]
merged_windows_to_elements[window].extend(values)
windows_to_elements = merged_windows_to_elements
for window in merged_away:
state.clear_state(window, self.WATERMARK_HOLD)
# Next handle element adding.
for window, elements in windows_to_elements.items():
if state.get_state(window, self.TOMBSTONE):
continue
# Add watermark hold.
# TODO(ccy): Add late data and garbage-collection hold support.
output_time = self.timestamp_combiner_impl.merge(
window,
(
element_output_time for element_output_time in (
self.timestamp_combiner_impl.assign_output_time(
window, timestamp) for unused_value,
timestamp in elements)
if element_output_time >= output_watermark))
if output_time is not None:
state.add_state(window, self.WATERMARK_HOLD, output_time)
context = state.at(window, self.clock)
for value, unused_timestamp in elements:
state.add_state(window, self.ELEMENTS, value)
self.trigger_fn.on_element(value, window, context)
# Maybe fire this window.
if self.trigger_fn.should_fire(TimeDomain.WATERMARK,
input_watermark,
window,
context):
finished = self.trigger_fn.on_fire(input_watermark, window, context)
yield self._output(window, finished, state, output_watermark, False)
def process_timer(
self,
window_id,
unused_name,
time_domain,
timestamp,
state,
input_watermark=None):
if input_watermark is None:
input_watermark = timestamp
if self.is_merging:
state = MergeableStateAdapter(state)
window = state.get_window(window_id)
if state.get_state(window, self.TOMBSTONE):
return
if time_domain in (TimeDomain.WATERMARK, TimeDomain.REAL_TIME):
if not self.is_merging or window in state.known_windows():
context = state.at(window, self.clock)
if self.trigger_fn.should_fire(time_domain, timestamp, window, context):
finished = self.trigger_fn.on_fire(timestamp, window, context)
yield self._output(
window,
finished,
state,
timestamp,
time_domain == TimeDomain.WATERMARK)
else:
raise Exception('Unexpected time domain: %s' % time_domain)
def _output(self, window, finished, state, output_watermark, maybe_ontime):
"""Output window and clean up if appropriate."""
index = state.get_state(window, self.INDEX)
state.add_state(window, self.INDEX, 1)
if output_watermark <= window.max_timestamp():
nonspeculative_index = -1
timing = windowed_value.PaneInfoTiming.EARLY
if state.get_state(window, self.NONSPECULATIVE_INDEX):
nonspeculative_index = state.get_state(
window, self.NONSPECULATIVE_INDEX)
state.add_state(window, self.NONSPECULATIVE_INDEX, 1)
_LOGGER.warning(
'Watermark moved backwards in time '
'or late data moved window end forward.')
else:
nonspeculative_index = state.get_state(window, self.NONSPECULATIVE_INDEX)
state.add_state(window, self.NONSPECULATIVE_INDEX, 1)
timing = (
windowed_value.PaneInfoTiming.ON_TIME if maybe_ontime and
nonspeculative_index == 0 else windowed_value.PaneInfoTiming.LATE)
pane_info = windowed_value.PaneInfo(
index == 0, finished, timing, index, nonspeculative_index)
values = state.get_state(window, self.ELEMENTS)
if finished:
# TODO(robertwb): allowed lateness
state.clear_state(window, self.ELEMENTS)
state.add_state(window, self.TOMBSTONE, 1)
elif self.accumulation_mode == AccumulationMode.DISCARDING:
state.clear_state(window, self.ELEMENTS)
timestamp = state.get_state(window, self.WATERMARK_HOLD)
if timestamp is None:
# If no watermark hold was set, output at end of window.
timestamp = window.max_timestamp()
elif output_watermark < window.end and self.trigger_fn.has_ontime_pane():
# Hold the watermark in case there is an empty pane that needs to be fired
# at the end of the window.
pass
else:
state.clear_state(window, self.WATERMARK_HOLD)
return WindowedValue(values, timestamp, (window, ), pane_info)
class InMemoryUnmergedState(UnmergedState):
"""In-memory implementation of UnmergedState.
Used for batch and testing.
"""
def __init__(self, defensive_copy=False):
# TODO(robertwb): Clean defensive_copy. It is too expensive in production.
self.timers = collections.defaultdict(dict)
self.state = collections.defaultdict(lambda: collections.defaultdict(list))
self.global_state = {}
self.defensive_copy = defensive_copy
def copy(self):
cloned_object = InMemoryUnmergedState(defensive_copy=self.defensive_copy)
cloned_object.timers = copy.deepcopy(self.timers)
cloned_object.global_state = copy.deepcopy(self.global_state)
for window in self.state:
for tag in self.state[window]:
cloned_object.state[window][tag] = copy.copy(self.state[window][tag])
return cloned_object
def set_global_state(self, tag, value):
assert isinstance(tag, _ReadModifyWriteStateTag)
if self.defensive_copy:
value = copy.deepcopy(value)
self.global_state[tag.tag] = value
def get_global_state(self, tag, default=None):
return self.global_state.get(tag.tag, default)
def set_timer(
self, window, name, time_domain, timestamp, dynamic_timer_tag=''):
self.timers[window][(name, time_domain, dynamic_timer_tag)] = timestamp
def clear_timer(self, window, name, time_domain, dynamic_timer_tag=''):
self.timers[window].pop((name, time_domain, dynamic_timer_tag), None)
if not self.timers[window]:
del self.timers[window]
def get_window(self, window_id):
return window_id
def add_state(self, window, tag, value):
if self.defensive_copy:
value = copy.deepcopy(value)
if isinstance(tag, _ReadModifyWriteStateTag):
self.state[window][tag.tag] = value
elif isinstance(tag, _CombiningValueStateTag):
# TODO(robertwb): Store merged accumulators.
self.state[window][tag.tag].append(value)
elif isinstance(tag, _ListStateTag):
self.state[window][tag.tag].append(value)
elif isinstance(tag, _SetStateTag):
self.state[window][tag.tag].append(value)
elif isinstance(tag, _WatermarkHoldStateTag):
self.state[window][tag.tag].append(value)
else:
raise ValueError('Invalid tag.', tag)
def get_state(self, window, tag):
values = self.state[window][tag.tag]
if isinstance(tag, _ReadModifyWriteStateTag):
return values
elif isinstance(tag, _CombiningValueStateTag):
return tag.combine_fn.apply(values)
elif isinstance(tag, _ListStateTag):
return values
elif isinstance(tag, _SetStateTag):
return values
elif isinstance(tag, _WatermarkHoldStateTag):
return tag.timestamp_combiner_impl.combine_all(values)
else:
raise ValueError('Invalid tag.', tag)
def clear_state(self, window, tag):
self.state[window].pop(tag.tag, None)
if not self.state[window]:
self.state.pop(window, None)
def get_timers(
self, clear=False, watermark=MAX_TIMESTAMP, processing_time=None):
"""Gets expired timers and reports if there
are any realtime timers set per state.
Expiration is measured against the watermark for event-time timers,
and against a wall clock for processing-time timers.
"""
expired = []
has_realtime_timer = False
for window, timers in list(self.timers.items()):
for (name, time_domain, dynamic_timer_tag), timestamp in list(
timers.items()):
if time_domain == TimeDomain.REAL_TIME:
time_marker = processing_time
has_realtime_timer = True
elif time_domain == TimeDomain.WATERMARK:
time_marker = watermark
else:
_LOGGER.error(
'TimeDomain error: No timers defined for time domain %s.',
time_domain)
if timestamp <= time_marker:
expired.append(
(window, (name, time_domain, timestamp, dynamic_timer_tag)))
if clear:
del timers[(name, time_domain, dynamic_timer_tag)]
if not timers and clear:
del self.timers[window]
return expired, has_realtime_timer
def get_and_clear_timers(self, watermark=MAX_TIMESTAMP):
return self.get_timers(clear=True, watermark=watermark)[0]
def get_earliest_hold(self):
earliest_hold = MAX_TIMESTAMP
for unused_window, tagged_states in iteritems(self.state):
# TODO(BEAM-2519): currently, this assumes that the watermark hold tag is
# named "watermark". This is currently only true because the only place
# watermark holds are set is in the GeneralTriggerDriver, where we use
# this name. We should fix this by allowing enumeration of the tag types
# used in adding state.
if 'watermark' in tagged_states and tagged_states['watermark']:
hold = min(tagged_states['watermark']) - TIME_GRANULARITY
earliest_hold = min(earliest_hold, hold)
return earliest_hold
def __repr__(self):
state_str = '\n'.join(
'%s: %s' % (key, dict(state)) for key, state in self.state.items())
return 'timers: %s\nstate: %s' % (dict(self.timers), state_str)
| apache-2.0 | 1,498,664,546,685,120,800 | 31.851325 | 87 | 0.669625 | false |
rogerthat-platform/plugin-rogerthat-control-center | plugins/rogerthat_control_center/rogerthat_control_center_plugin.py | 1 | 3970 | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from framework.bizz.authentication import get_current_session
from framework.consts import JINJA_ENVIRONMENT
from framework.plugin_loader import BrandingPlugin, get_plugin
from framework.utils.plugins import Module, Handler
from mcfw.consts import AUTHENTICATED
from mcfw.restapi import rest_functions
from mcfw.rpc import parse_complex_value, returns
from plugins.rogerthat_control_center.api import backends, permissions
from plugins.rogerthat_control_center.bizz.authentication import get_permissions_from_scopes
from plugins.rogerthat_control_center.handlers.index import IndexPageHandler
from plugins.rogerthat_control_center.handlers.proxy import ProxyHandlerConfigurator, \
ProxyHandlerRogerthat, ProxyCreateApp, ProxyUpdateApp
from plugins.rogerthat_control_center.plugin_consts import NAMESPACE
from plugins.rogerthat_control_center.to.configuration import RogerthatControlCenterConfiguration
from plugins.rogerthat_control_center.utils import CopyrightExtension
class RogerthatControlCenterPlugin(BrandingPlugin):
def __init__(self, configuration):
super(RogerthatControlCenterPlugin, self).__init__(configuration)
self.configuration = parse_complex_value(RogerthatControlCenterConfiguration, configuration,
False) # type: RogerthatControlCenterConfiguration
JINJA_ENVIRONMENT.add_extension(CopyrightExtension)
def get_handlers(self, auth):
if auth == Handler.AUTH_AUTHENTICATED:
yield Handler('/api/plugins/rcc/proxy/configurator<route:.*>', ProxyHandlerConfigurator)
yield Handler('/api/plugins/rcc/proxy/rogerthat/<backend_id:.*?>/<route:.*>', ProxyHandlerRogerthat)
yield Handler('/api/plugins/rcc/proxy/apps/<backend_id:[^/]+>/<app_id:[^/]+>', ProxyUpdateApp)
yield Handler('/api/plugins/rcc/proxy/apps/<backend_id:.*?>', ProxyCreateApp)
for url, handler in rest_functions(backends, authentication=AUTHENTICATED):
yield Handler(url=url, handler=handler)
for url, handler in rest_functions(permissions, authentication=AUTHENTICATED):
yield Handler(url=url, handler=handler)
elif auth == Handler.AUTH_UNAUTHENTICATED:
yield Handler('/', IndexPageHandler)
def get_client_routes(self):
return ['/apps<route:.*>', '/backends<route:.*>', '/developer-accounts<route:.*>', '/review-notes<route:.*>',
'/contacts<route:.*>']
def get_modules(self):
perms = get_permissions_from_scopes(get_current_session().scopes)
modules = [Module(u'review_notes', [], 30), Module(u'contacts', [], 40)]
if perms.admin or perms.developer_accounts or perms.apps or perms.developer_accounts_perm:
modules.append(Module(u'apps', [], 10))
if perms.admin or perms.developer_accounts or perms.developer_accounts_perm:
modules.append(Module(u'developer_accounts', [], 20))
if perms.admin or perms.backend_servers:
modules.append(Module(u'backend_servers', [], 10000))
if len(modules) > 1:
modules.append(Module(u'home', [], 0))
return modules
@returns(RogerthatControlCenterPlugin)
def get_rcc_plugin():
# type: () -> RogerthatControlCenterPlugin
p = get_plugin(NAMESPACE)
return p
| apache-2.0 | 3,076,653,277,952,046,000 | 50.558442 | 117 | 0.713854 | false |
t-wissmann/qutebrowser | tests/unit/browser/webengine/test_webenginesettings.py | 1 | 3795 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import types
import logging
import pytest
pytest.importorskip('PyQt5.QtWebEngineWidgets')
from qutebrowser.browser.webengine import webenginesettings
from qutebrowser.utils import usertypes, qtutils
from qutebrowser.misc import objects
@pytest.fixture(autouse=True)
def init(qapp, config_stub, cache_tmpdir, data_tmpdir, monkeypatch):
monkeypatch.setattr(webenginesettings.webenginequtescheme, 'init',
lambda: None)
monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebEngine)
init_args = types.SimpleNamespace(enable_webengine_inspector=False)
webenginesettings.init(init_args)
config_stub.changed.disconnect(webenginesettings._update_settings)
def test_big_cache_size(config_stub):
"""Make sure a too big cache size is handled correctly."""
config_stub.val.content.cache.size = 2 ** 63 - 1
profile = webenginesettings.default_profile
profile.setter.set_http_cache_size()
assert profile.httpCacheMaximumSize() == 2 ** 31 - 1
@pytest.mark.skipif(
not qtutils.version_check('5.8'), reason="Needs Qt 5.8 or newer")
def test_non_existing_dict(config_stub, monkeypatch, message_mock, caplog):
monkeypatch.setattr(webenginesettings.spell, 'local_filename',
lambda _code: None)
config_stub.val.spellcheck.languages = ['af-ZA']
with caplog.at_level(logging.WARNING):
webenginesettings._update_settings('spellcheck.languages')
msg = message_mock.getmsg(usertypes.MessageLevel.warning)
expected = ("Language af-ZA is not installed - see scripts/dictcli.py in "
"qutebrowser's sources")
assert msg.text == expected
@pytest.mark.skipif(
not qtutils.version_check('5.8'), reason="Needs Qt 5.8 or newer")
def test_existing_dict(config_stub, monkeypatch):
monkeypatch.setattr(webenginesettings.spell, 'local_filename',
lambda _code: 'en-US-8-0')
config_stub.val.spellcheck.languages = ['en-US']
webenginesettings._update_settings('spellcheck.languages')
for profile in [webenginesettings.default_profile,
webenginesettings.private_profile]:
assert profile.isSpellCheckEnabled()
assert profile.spellCheckLanguages() == ['en-US-8-0']
@pytest.mark.skipif(
not qtutils.version_check('5.8'), reason="Needs Qt 5.8 or newer")
def test_spell_check_disabled(config_stub, monkeypatch):
config_stub.val.spellcheck.languages = []
webenginesettings._update_settings('spellcheck.languages')
for profile in [webenginesettings.default_profile,
webenginesettings.private_profile]:
assert not profile.isSpellCheckEnabled()
def test_default_user_agent_saved():
assert webenginesettings.parsed_user_agent is not None
def test_parsed_user_agent(qapp):
webenginesettings.init_user_agent()
parsed = webenginesettings.parsed_user_agent
assert parsed.upstream_browser_key == 'Chrome'
assert parsed.qt_key == 'QtWebEngine'
| gpl-3.0 | -3,604,195,305,408,544,300 | 38.123711 | 78 | 0.724901 | false |
ipedrazas/plano | python/run.py | 1 | 1670 | from eve import Eve
from flask import request
from flask.ext.cors import CORS
import plano_utils
def after_insert_application(documents):
for doc in documents:
plano_utils.add_application(doc)
def after_insert_component(documents):
for doc in documents:
plano_utils.add_component(doc)
app = Eve()
cors = CORS(app)
app.on_inserted_applications += after_insert_application
app.on_inserted_components += after_insert_component
@app.route("/version")
def version():
return 'plano.io v0.0.1'
@app.route("/rel/ispart", methods=['POST'])
def rel_is_part():
json = request.json
if plano_utils.create_rels("Component", json["source"], "Application", json["target"], "IS_PART"):
return "created", 201
return "Relationship already exists", 409
@app.route("/rel/connects", methods=['POST'])
def rel_connects():
json = request.json
if plano_utils.create_rels("Component", json["source"], "Component", json["target"], "CONNECTS"):
return "created", 201
return "Relationship already exists", 409
@app.route('/compose', methods=['POST'])
def upload_file():
if request.method == 'POST':
application = request.form.get("application")
file = request.files['file']
if application and file and plano_utils.allowed_file(file.filename):
object_app = plano_utils.get_or_create_app(app, application)
nodes = plano_utils.parse_yaml(file)
plano_utils.add_nodes(nodes, app, str(object_app['_id']), app.logger)
return "Yaml parsed", 201
return "Trouble", 409
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| apache-2.0 | -3,332,283,286,137,989,600 | 28.298246 | 102 | 0.659281 | false |
frederic-mahe/FROGS | tools/preprocess.py | 1 | 43108 | #!/usr/bin/env python2.7
#
# Copyright (C) 2014 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'Frederic Escudie - Plateforme bioinformatique Toulouse'
__copyright__ = 'Copyright (C) 2015 INRA'
__license__ = 'GNU General Public License'
__version__ = '1.3.0'
__email__ = '[email protected]'
__status__ = 'prod'
import re
import os
import sys
import gzip
import json
import shutil
import tarfile
import argparse
import threading
import multiprocessing
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
BIN_DIR = os.path.abspath(os.path.join(os.path.dirname(CURRENT_DIR), "bin"))
os.putenv('PATH', BIN_DIR + os.pathsep + os.getenv('PATH')) # $PATH
sys.path.insert(0, BIN_DIR) # $PYTHONPATH
from frogsUtils import *
from sequenceIO import SequenceFileReader
##################################################################################################################################################
#
# COMMAND LINES
#
##################################################################################################################################################
class Flash(Cmd):
"""
@summary : Overlapping and merging mate pairs from fragments shorter than twice the length of reads. The others fragments are discarded.
"""
def __init__(self, in_R1, in_R2, out_join, param):
"""
@param in_R1 : [str] Path to the R1 fastq file.
@param in_R2 : [str] Path to the R2 fastq file.
@param out_join : [str] Path to the output fastq file.
@param param : [Namespace] The 'param.min_amplicon_size', 'param.max_amplicon_size' and 'param.expected_amplicon_size'
"""
min_overlap = (param.R1_size + param.R2_size) - param.max_amplicon_size
max_expected_overlap = (param.R1_size + param.R2_size) - param.expected_amplicon_size + min(20, int((param.expected_amplicon_size - param.min_amplicon_size)/2))
Cmd.__init__( self,
'flash',
'Join overlapping paired reads.',
'--threads 1 --min-overlap ' + str(min_overlap) + ' --max-overlap ' + str(max_expected_overlap) + ' --max-mismatch-density 0.1 --compress ' + in_R1 + ' ' + in_R2 + ' --to-stdout > ' + out_join + ' 2> /dev/null',
'--version' )
self.output = out_join
def get_version(self):
"""
@summary : Returns the program version number.
@return : version number if this is possible, otherwise this method return 'unknown'.
"""
return Cmd.get_version(self, 'stdout').split()[1].strip()
def parser(self, log_file):
"""
@summary : Parse the command results to add information in log_file.
@log_file : [str] Path to the sample process log file.
"""
# Parse output
nb_seq = get_fastq_nb_seq(self.output)
# Write result
FH_log = Logger( log_file )
FH_log.write( 'Results:\n' )
FH_log.write( '\tnb seq overlapped: ' + str(nb_seq) + '\n' )
FH_log.close()
class LengthFilter(Cmd):
"""
@summary : Filters sequences by length.
"""
def __init__(self, in_fastq, out_fastq, log_file, param):
"""
@param in_fastq : [str] Path to the processed fastq.
@param out_fastq : [str] Path to the fastq with valid sequences.
@param log_file : [str] Path to the log file.
@param param : [Namespace] The 'param.min_amplicon_size' and 'param.max_amplicon_size'.
"""
Cmd.__init__( self,
'filterSeq.py',
'Filters amplicons by length.',
'--compress --min-length ' + str(param.min_amplicon_size) + ' --max-length ' + str(param.max_amplicon_size) + ' --input-file ' + in_fastq + ' --output-file ' + out_fastq + ' --log-file ' + log_file,
'--version' )
self.program_log = log_file
def parser(self, log_file):
"""
@summary : Parse the command results to add information in log_file.
@log_file : [str] Path to the sample process log file.
"""
# Parse output
FH_log_filter = open( self.program_log )
nb_processed = 0
filtered_on_length = 0
for line in FH_log_filter:
if line.startswith('Nb seq filtered on length'):
filtered_on_length = int(line.split(':')[1].strip())
elif line.startswith('Nb seq processed'):
nb_processed = int(line.split(':')[1].strip())
FH_log_filter.close()
# Write result
FH_log = Logger( log_file )
FH_log.write( 'Results:\n' )
FH_log.write( '\tnb seq with expected length : ' + str(nb_processed - filtered_on_length) + '\n' )
FH_log.close()
class Remove454prim(Cmd):
"""
@summary: Removes reads without the 3' and 5' primer and removes primers sequences.
"""
def __init__(self, in_fastq, out_fastq, cutadapt_log, param):
"""
@param in_fastq : [str] Path to the processed fastq.
@param out_fastq : [str] Path to the fastq with valid sequences.
@param cutadapt_log : [str] Path to the log file.
@param param : [Namespace] 'param.five_prim_primer', 'param.three_prim_primer' and 'param.min_amplicon_size'.
"""
Cmd.__init__( self,
'remove454Adapt.py',
"Removes reads without the 3' and 5' primer and removes primers sequences.",
'--five-prim-primer ' + param.five_prim_primer + ' --three-prim-primer ' + param.three_prim_primer + ' --error-rate 0.1 --non-overlap 1 --min-length ' + str(args.min_amplicon_size) + ' -i ' + in_fastq + ' -o ' + out_fastq + ' > ' + cutadapt_log,
'--version' )
self.output_seq = out_fastq
def parser(self, log_file):
"""
@summary : Parse the command results to add information in log_file.
@log_file : [str] Path to the sample process log file.
"""
# Parse output
FH_output_seq = SequenceFileReader.factory( self.output_seq )
nb_cleaned = 0
for record in FH_output_seq:
nb_cleaned += 1
FH_output_seq.close()
# Write result
FH_log = Logger( log_file )
FH_log.write( 'Results:\n' )
FH_log.write( "\tnb seq with the two primers : " + str(nb_cleaned) + '\n' )
FH_log.close()
class Cutadapt5prim(Cmd):
"""
@summary: Removes reads without the 5' primer and removes primer sequence.
"""
def __init__(self, in_fastq, out_fastq, cutadapt_log, param):
"""
@param in_fastq : [str] Path to the processed fastq.
@param out_fastq : [str] Path to the fastq with valid sequences.
@param cutadapt_log : [str] Path to the log file.
@param param : [Namespace] The primer sequence 'param.five_prim_primer'.
"""
Cmd.__init__( self,
'cutadapt',
"Removes reads without the 5' primer and removes primer sequence.",
'-g ' + param.five_prim_primer + ' --error-rate 0.1 --discard-untrimmed --match-read-wildcards --overlap ' + str(len(param.five_prim_primer) -1) + ' -o ' + out_fastq + ' ' + in_fastq + ' > ' + cutadapt_log,
'--version' )
self.output_seq = out_fastq
def parser(self, log_file):
"""
@summary : Parse the command results to add information in log_file.
@log_file : [str] Path to the sample process log file.
"""
# Parse output
FH_output_seq = SequenceFileReader.factory( self.output_seq )
nb_cleaned = 0
for record in FH_output_seq:
nb_cleaned += 1
FH_output_seq.close()
# Write result
FH_log = Logger( log_file )
FH_log.write( 'Results:\n' )
FH_log.write( "\tnb seq with 5' primer : " + str(nb_cleaned) + '\n' )
FH_log.close()
def get_version(self):
"""
@summary : Returns the program version number.
@return : version number if this is possible, otherwise this method return 'unknown'.
"""
return Cmd.get_version(self, 'stdout')
class Cutadapt3prim(Cmd):
"""
@summary : Removes reads without the 3' primer and removes primer sequence.
"""
def __init__(self, in_fastq, out_fastq, cutadapt_log, param):
"""
@param in_fastq : [str] Path to the processed fastq.
@param out_fastq : [str] Path to the fastq with valid sequences.
@param cutadapt_log : [str] Path to the log file.
@param param : [Namespace] The primer sequence 'param.three_prim_primer'.
"""
Cmd.__init__( self,
'cutadapt',
"Removes reads without the 3' primer and removes primer sequence.",
'-a ' + param.three_prim_primer + ' --error-rate 0.1 --discard-untrimmed --match-read-wildcards --overlap ' + str(len(param.three_prim_primer) -1) + ' -o ' + out_fastq + ' ' + in_fastq + ' > ' + cutadapt_log,
'--version' )
self.output_seq = out_fastq
def parser(self, log_file):
"""
@summary : Parse the command results to add information in log_file.
@log_file : [str] Path to the sample process log file.
"""
# Parse output
FH_output_seq = SequenceFileReader.factory( self.output_seq )
nb_cleaned = 0
for record in FH_output_seq:
nb_cleaned += 1
FH_output_seq.close()
# Write result
FH_log = Logger( log_file )
FH_log.write( 'Results:\n' )
FH_log.write( "\tnb seq with 3' primer : " + str(nb_cleaned) + '\n' )
FH_log.close()
def get_version(self):
"""
@summary : Returns the program version number.
@return : version number if this is possible, otherwise this method return 'unknown'.
"""
return Cmd.get_version(self, 'stdout')
class MultiFilter(Cmd):
"""
@summary : Filters sequences.
"""
def __init__(self, in_fastq, out_fasta, log_file, param):
"""
@param in_fastq : [str] Path to the processed fastq.
@param out_fasta : [str] Path to the fasta with valid sequences.
@param log_file : [str] Path to the log file.
@param param : [Namespace] The 'param.min_amplicon_size', ['param.five_prim_primer'], ['param.three_prim_primer'].
"""
cmd_description = 'Filters amplicons without primers by length and N count.',
add_options = ""
if param.sequencer == "454":
cmd_description = 'Filters amplicons without primers by length, N count, homopolymer length and distance between quality low quality.',
add_options = " --max-homopolymer 7 --qual-window threshold:10 win_size:10"
primers_size = 0
if param.five_prim_primer is not None: primers_size += len(param.five_prim_primer)
if param.three_prim_primer is not None: primers_size += len(param.three_prim_primer)
Cmd.__init__( self,
'filterSeq.py',
'Filters amplicons without primers by length and N count.',
'--force-fasta --max-N 0 --min-length ' + str(param.min_amplicon_size - primers_size) + ' --max-length ' + str(param.max_amplicon_size - primers_size) + add_options + ' --input-file ' + in_fastq + ' --output-file ' + out_fasta + ' --log-file ' + log_file,
'--version' )
self.program_log = log_file
def parser(self, log_file):
"""
@summary : Parse the command results to add information in log_file.
@log_file : [str] Path to the sample process log file.
"""
# Parse output
FH_log_filter = open( self.program_log )
nb_processed = 0
filtered_on_length = None
filtered_on_N = None
filtered_on_homopolymer = None
filtered_on_quality = None
for line in FH_log_filter:
if line.startswith('Nb seq filtered on length'):
filtered_on_length = int(line.split(':')[1].strip())
elif line.startswith('Nb seq filtered on N'):
filtered_on_N = int(line.split(':')[1].strip())
elif line.startswith('Nb seq filtered on homopolymer'):
filtered_on_homopolymer = int(line.split(':')[1].strip())
elif line.startswith('Nb seq filtered on quality'):
filtered_on_quality = int(line.split(':')[1].strip())
elif line.startswith('Nb seq processed'):
nb_processed = int(line.split(':')[1].strip())
FH_log_filter.close()
# Write result
previous_nb_seq = nb_processed
FH_log = Logger( log_file )
FH_log.write( 'Results:\n' )
if filtered_on_length is not None:
FH_log.write( '\tnb seq with expected length : ' + str(previous_nb_seq - filtered_on_length) + '\n' )
previous_nb_seq -= filtered_on_length
if filtered_on_N is not None:
FH_log.write( '\tnb seq without N : ' + str(previous_nb_seq - filtered_on_N) + '\n' )
previous_nb_seq -= filtered_on_N
if filtered_on_homopolymer is not None:
FH_log.write( '\tnb seq without large homopolymer : ' + str(previous_nb_seq - filtered_on_homopolymer) + '\n' )
previous_nb_seq -= filtered_on_homopolymer
if filtered_on_quality is not None:
FH_log.write( '\tnb seq without nearest poor quality : ' + str(previous_nb_seq - filtered_on_quality) + '\n' )
previous_nb_seq -= filtered_on_quality
FH_log.close()
class DerepBySample(Cmd):
"""
@summary : Dereplicates sample sequences.
"""
def __init__(self, in_fasta, out_fasta, out_count):
"""
@param in_fasta : [str] Path to the processed fasta.
@param out_fasta : [str] Path to the dereplicated fasta.
@param out_count : [str] Path to the count file.
"""
Cmd.__init__( self,
'derepSamples.py',
'Dereplicates sample sequences.',
'--sequences-files ' + in_fasta + ' --dereplicated-file ' + out_fasta + ' --count-file ' + out_count,
'--version' )
class DerepGlobal(Cmd):
"""
@summary : Dereplicates together sequences from several files.
"""
def __init__(self, all_fasta, samples_names, out_fasta, out_count, param):
"""
@param all_fasta : [list] Path to the processed fasta.
@param samples_names : [list] The sample name for each fasta.
@param out_fasta : [str] Path to the dereplicated fasta.
@param out_count : [str] Path to the count file. It contains the count
by sample for each representative sequence.
@param param : [str] The 'param.nb_cpus'.
"""
Cmd.__init__( self,
'derepSamples.py',
'Dereplicates together sequences from several samples.',
"--nb-cpus " + str(param.nb_cpus) + " --size-separator ';size=' --sequences-files " + " ".join(all_fasta) + " --samples-names " + " ".join(samples_names) + " --dereplicated-file " + out_fasta + " --count-file " + out_count,
'--version' )
##################################################################################################################################################
#
# FUNCTIONS
#
##################################################################################################################################################
def get_seq_length( input_file, size_separator=None ):
"""
@summary: Returns the number of sequences by sequences lengths.
@param input_file: [str] The sequence file path.
@param size_separator: [str] If it exists the size separator in sequence ID.
@return: [dict] By sequences lengths the number of sequence.
"""
nb_by_length = dict()
FH_seq = SequenceFileReader.factory( input_file )
for record in FH_seq:
nb_seq = 1
if size_separator is not None:
nb_seq = int(record.id.rsplit(size_separator, 1)[-1])
seq_length = len(record.string)
if not nb_by_length.has_key(str(seq_length)):
nb_by_length[str(seq_length)] = 0
nb_by_length[str(seq_length)] += nb_seq
FH_seq.close()
return nb_by_length
def summarise_results( summary_file, samples_names, log_files, filtered_files ):
"""
@summary: Writes one summary of results from several logs.
@param summary_file: [str] The output file.
@param samples_names: [list] The samples names.
@param log_files: [list] The list of path to log files (in samples_names order).
@param filtered_files: [list] The list of path to sequences files after preprocessing (in samples_names order).
"""
# Get data
categories = get_filter_steps(log_files[0])
filters_by_sample = dict()
lengths_by_sample = dict()
for spl_idx, spl_name in enumerate(samples_names):
filters_by_sample[spl_name] = get_sample_resuts(log_files[spl_idx])
lengths_by_sample[spl_name] = get_seq_length(filtered_files[spl_idx], ';size=')
# Write
FH_summary_tpl = open( os.path.join(CURRENT_DIR, "preprocess_tpl.html") )
FH_summary_out = open( summary_file, "w" )
for line in FH_summary_tpl:
if "###FILTERS_CATEGORIES###" in line:
line = line.replace( "###FILTERS_CATEGORIES###", json.dumps(categories) )
elif "###FILTERS_DATA###" in line:
line = line.replace( "###FILTERS_DATA###", json.dumps(filters_by_sample) )
elif "###LENGTHS_DATA###" in line:
line = line.replace( "###LENGTHS_DATA###", json.dumps(lengths_by_sample) )
FH_summary_out.write( line )
FH_summary_out.close()
FH_summary_tpl.close()
def get_filter_steps( log_file ):
"""
@summary: Returns the ordered list of steps.
@param log_file: [str] Path to a log file.
@return: [list] The ordered list of steps.
"""
steps = ["before process"]
FH_input = open(log_file)
for line in FH_input:
if line.strip().startswith('nb seq') and not line.strip().startswith('nb seq before process'):
steps.append( line.split('nb seq')[1].split(':')[0].strip() )
FH_input.close()
return steps
def get_sample_resuts( log_file ):
"""
@summary: Returns the sample results (number of sequences after each filters).
@param log_file: [str] Path to a log file.
@return: [list] The number of sequences after each filter.
"""
nb_seq = list()
FH_input = open(log_file)
for line in FH_input:
if line.strip().startswith('nb seq before process'):
nb_seq.append( int(line.split(':')[1].strip()) )
elif line.strip().startswith('nb seq'):
nb_seq.append( int(line.split(':')[1].strip()) )
FH_input.close()
return nb_seq
def log_append_files( log_file, appended_files ):
"""
@summary: Append content of several log files in one log file.
@param log_file: [str] The log file where contents of others are appended.
@param appended_files: [list] List of log files to append.
"""
FH_log = Logger( log_file )
FH_log.write( "\n" )
for current_file in appended_files:
FH_input = open(current_file)
for line in FH_input:
FH_log.write( line )
FH_input.close()
FH_log.write( "\n" )
FH_log.write( "\n" )
FH_log.close()
def samples_from_tar( archive, contiged, global_tmp_files, R1_files, R2_files, samples_names ):
"""
@summary: Extracts samples files from the archive and set R1_files, R2_files and samples_names.
@param archive: [str] Path to the tar file.
@param contiged: [bool] True if the R1 and R2 files are already contiged for each sample.
@param global_tmp_files: [TmpFiles] The tmp file manager for the global script (extracted files will be added into this manager).
@param R1_files: [list] The list of path to extracted R1 files.
@param R2_files: [list] The list of path to extracted R2 files.
@param samples_names: [list] The samples names.
"""
R1_tmp = list()
R2_tmp = list()
tmp_folder = os.path.join( global_tmp_files.tmp_dir, global_tmp_files.prefix + "_tmp" )
if not tarfile.is_tarfile(archive):
raise Exception("The archive '" + archive + "' is not a tar file.")
FH_tar = tarfile.open(archive)
# List R1_files, R2_files and samples_names
archive_members = sorted(FH_tar.getmembers(), key=lambda member: member.name)
for file_info in archive_members:
if file_info.isfile():
if "_R1" in file_info.name or "_r1" in file_info.name:
samples_names.append( re.split('_[Rr]1', file_info.name)[0] )
R1_files.append( global_tmp_files.add(file_info.name) )
R1_tmp.append( os.path.join(tmp_folder, file_info.name) )
elif "_R2" in file_info.name or "_r2" in file_info.name:
R2_files.append( global_tmp_files.add(file_info.name) )
R2_tmp.append( os.path.join(tmp_folder, file_info.name) )
else:
if contiged:
samples_names.append( file_info.name.split('.')[0] )
R1_tmp.append( os.path.join(tmp_folder, file_info.name) )
R1_files.append( global_tmp_files.add(file_info.name) )
else:
raise Exception("The file '" + file_info.name + "' in archive '" + archive + "' is invalid. The files names must contain '_R1' or '_R2'.")
else:
raise Exception("The archive '" + archive + "' must not contain folders.")
if len(R1_files) != len(R2_files) and not contiged:
if len(R1_files) > len(R2_files):
raise Exception( str(len(R1_files) - len(R2_files)) + " R2 file(s) are missing in arhive '" + archive + "'. R1 file : [" + ", ".join(R1_files) + "] ; R2 files : [" + ", ".join(R2_files) + "]" )
else:
raise Exception( str(len(R2_files) - len(R1_files)) + " R1 file(s) are missing in arhive '" + archive + "'. R1 file : [" + ", ".join(R1_files) + "] ; R2 files : [" + ", ".join(R2_files) + "]" )
try:
# Extract
FH_tar.extractall(tmp_folder)
FH_tar.close()
# Move files
for idx in range(len(samples_names)):
shutil.move( R1_tmp[idx], R1_files[idx] )
if not contiged:
shutil.move( R2_tmp[idx], R2_files[idx] )
except:
for current_file in R1_files + R2_files:
if os.path.exists(current_file) : os.remove( current_file )
finally:
for current_file in R1_tmp + R2_tmp:
if os.path.exists(current_file) : os.remove( current_file )
os.rmdir(tmp_folder)
def is_gzip( file ):
"""
@return: [bool] True if the file is gziped.
@param file : [str] Path to processed file.
"""
is_gzip = None
FH_input = gzip.open( file )
try:
FH_input.readline()
is_gzip = True
except:
is_gzip = False
finally:
FH_input.close()
return is_gzip
def get_fastq_nb_seq( fastq_file ):
"""
@summary: Returns the number of sequences in fastq_file.
@param fastq_file: [str] Path to the fastq file processed.
@return: [int] The number of sequences.
"""
FH_input = None
if not is_gzip(fastq_file):
FH_input = open( fastq_file )
else:
FH_input = gzip.open( fastq_file )
nb_line = 0
for line in FH_input:
nb_line += 1
FH_input.close()
nb_seq = nb_line/4
return nb_seq
def get_fasta_nb_seq( fasta_file ):
"""
@summary: Returns the number of sequences in fasta_file.
@param fasta_file: [str] Path to the fasta file processed.
@return: [int] The number of sequences.
"""
FH_input = None
if not is_gzip(fasta_file):
FH_input = open( fasta_file )
else:
FH_input = gzip.open( fasta_file )
nb_seq = 0
for line in FH_input:
if line.startswith(">"):
nb_seq += 1
FH_input.close()
return nb_seq
def filter_process_multiples_files(R1_files, R2_files, samples_names, out_files, log_files, args):
"""
@summary: filters sequences of samples.
@param R1_files: [list] List of path to reads 1 fastq files or contiged files (one by sample).
@param R2_files: [list] List of path to reads 2 fastq files (one by sample).
@param samples_names: [list] The list of sample name for each R1/R2-files.
@param out_files: [list] List of path to the filtered files (one by sample).
@param log_files: [list] List of path to the outputed log (one by sample). It contains a trace of all the operations and results.
@param args: [Namespace] Global parameters.
"""
for idx in range(len(out_files)):
if args.already_contiged:
process_sample( R1_files[idx], None, samples_names[idx], out_files[idx], log_files[idx], args )
else:
process_sample( R1_files[idx], R2_files[idx], samples_names[idx], out_files[idx], log_files[idx], args )
def process_sample(R1_file, R2_file, sample_name, out_file, log_file, args):
"""
@summary: Merges, filters and dereplicates all sequences of one sample.
@param R1_file: [str] Path to reads 1 fastq file or contiged file of the sample.
@param R2_file: [str] Path to reads 2 fastq file of the sample.
@param sample_name: [str] The sample name.
@param out_files: [str] Path to the filtered file.
@param log_files: [str] Path to the outputed log. It contains a trace of all the operations and results.
@param args: [Namespace] Global parameters.
"""
tmp_files = TmpFiles( os.path.split(out_file)[0] )
out_flash = tmp_files.add( sample_name + '_flash.fastq.gz' )
out_lengthFilter = tmp_files.add( sample_name + '_length_filter.fastq.gz' )
log_lengthFilter = tmp_files.add( sample_name + '_length_filter_log.txt' )
tmp_cutadapt = tmp_files.add( sample_name + '_cutadapt_5prim_trim.fastq.gz' )
log_5prim_cutadapt = tmp_files.add( sample_name + '_cutadapt_5prim_log.txt' )
log_3prim_cutadapt = tmp_files.add( sample_name + '_cutadapt_3prim_log.txt' )
out_cutadapt = tmp_files.add( sample_name + '_cutadapt.fastq.gz' )
out_NAndLengthfilter = tmp_files.add( sample_name + '_N_and_length_filter.fasta' )
log_NAndLengthfilter = tmp_files.add( sample_name + '_N_and_length_filter_log.txt' )
out_count = tmp_files.add( sample_name + '_derep_count.tsv' )
try:
# Start log
FH_log = open(log_file, "w")
if not args.already_contiged:
FH_log.write('##Sample\nR1 : ' + R1_file + '\nR2 : ' + R2_file + '\nSample name : ' + sample_name + '\n')
else:
FH_log.write('##Sample\nContiged file : ' + R1_file + '\nSample name : ' + sample_name + '\n')
FH_log.write('nb seq before process : ' + str(get_fastq_nb_seq(R1_file)) +'\n' )
FH_log.write('##Commands\n')
FH_log.close()
# Commands execution
if not args.already_contiged:
flash_cmd = Flash(R1_file, R2_file, out_flash, args)
flash_cmd.submit(log_file)
else:
out_flash = R1_file
if args.sequencer == "454": # 454
if is_gzip( out_flash ):
renamed_out_flash = tmp_files.add( sample_name + '_454.fastq.gz' ) # prevent cutadapt problem (type of file is checked by extension)
else:
renamed_out_flash = tmp_files.add( sample_name + '_454.fastq' ) # prevent cutadapt problem (type of file is checked by extension)
shutil.copyfile( out_flash, renamed_out_flash ) # prevent symlink problem
Remove454prim(renamed_out_flash, out_cutadapt, log_3prim_cutadapt, args).submit(log_file)
else: # Illumina
if args.five_prim_primer and args.three_prim_primer: # Illumina standard sequencing protocol
LengthFilter(out_flash, out_lengthFilter, log_lengthFilter, args).submit(log_file)
Cutadapt5prim(out_lengthFilter, tmp_cutadapt, log_5prim_cutadapt, args).submit(log_file)
Cutadapt3prim(tmp_cutadapt, out_cutadapt, log_3prim_cutadapt, args).submit(log_file)
else: # Custom sequencing primers. The amplicons is full length (Illumina) except PCR primers (it is use as sequencing primers). [Protocol Kozich et al. 2013]
out_cutadapt = out_flash
MultiFilter(out_cutadapt, out_NAndLengthfilter, log_NAndLengthfilter, args).submit(log_file)
DerepBySample(out_NAndLengthfilter, out_file, out_count).submit(log_file)
finally:
if not args.debug:
tmp_files.deleteAll()
def process( args ):
tmp_files = TmpFiles( os.path.split(args.output_dereplicated)[0] )
# Process
try:
samples_names = list()
R1_files = list()
R2_files = list()
filtered_files = list()
log_files = list()
# Inputs
if args.input_archive is not None: # input is an archive
samples_from_tar( args.input_archive, args.already_contiged, tmp_files, R1_files, R2_files, samples_names )
else: # inputs are files
if args.sequencer == "illumina":
if args.R2_size is not None:
R2_files = args.input_R2
R1_files = args.input_R1
samples_names = [os.path.basename(current_R1).split('.')[0] for current_R1 in args.input_R1]
if args.samples_names is not None:
samples_names = args.samples_names
# Tmp files
filtered_files = [tmp_files.add(current_sample + '_filtered.fasta') for current_sample in samples_names]
log_files = [tmp_files.add(current_sample + '_log.txt') for current_sample in samples_names]
# Filter
nb_processses_used = min( len(R1_files), args.nb_cpus )
if nb_processses_used == 1:
filter_process_multiples_files( R1_files, R2_files, samples_names, filtered_files, log_files, args )
else:
processes = [{'process':None, 'R1_files':[], 'R2_files':[], 'samples_names':[], 'filtered_files':[], 'log_files':[]} for idx in range(nb_processses_used)]
# Set processes
for idx in range(len(R1_files)):
process_idx = idx % nb_processses_used
processes[process_idx]['R1_files'].append(R1_files[idx])
if not args.already_contiged:
processes[process_idx]['R2_files'].append(R2_files[idx])
processes[process_idx]['samples_names'].append(samples_names[idx])
processes[process_idx]['filtered_files'].append(filtered_files[idx])
processes[process_idx]['log_files'].append(log_files[idx])
# Launch processes
for current_process in processes:
if idx == 0: # First process is threaded with parent job
current_process['process'] = threading.Thread( target=filter_process_multiples_files,
args=(current_process['R1_files'], current_process['R2_files'], current_process['samples_names'], current_process['filtered_files'], current_process['log_files'], args) )
else: # Others processes are processed on different CPU
current_process['process'] = multiprocessing.Process( target=filter_process_multiples_files,
args=(current_process['R1_files'], current_process['R2_files'], current_process['samples_names'], current_process['filtered_files'], current_process['log_files'], args) )
current_process['process'].start()
# Wait processes end
for current_process in processes:
current_process['process'].join()
# Check processes status
for current_process in processes:
if issubclass(current_process['process'].__class__, multiprocessing.Process) and current_process['process'].exitcode != 0:
raise Exception( "Error in sub-process execution." )
# Write summary
summarise_results( args.summary, samples_names, log_files, filtered_files )
log_append_files( args.log_file, log_files )
# Dereplicate global
Logger.static_write(args.log_file, '##Sample\nAll\n##Commands\n')
DerepGlobal(filtered_files, samples_names, args.output_dereplicated, args.output_count, args).submit( args.log_file )
# Check the number of sequences after filtering
if get_fasta_nb_seq(args.output_dereplicated) == 0:
raise Exception( "The filters have eliminated all sequences (see summary for more details)." )
# Remove temporary files
finally:
if not args.debug:
tmp_files.deleteAll()
##################################################################################################################################################
#
# MAIN
#
##################################################################################################################################################
if __name__ == "__main__":
# Manage parameters
parser = argparse.ArgumentParser(
description='Pre-process amplicons to use reads in diversity analysis.',
usage='\n For samples files:' +
'\n preprocess.py illumina ' +
'\n --input-R1 R1_FILE [R1_FILE ...]' +
'\n --already-contiged | --R2-files R2_FILE [R2_FILE ...] --R1-size R1_SIZE --R2-size R2_SIZE --expected-amplicon-size SIZE' +
'\n --min-amplicon-size MIN_AMPLICON_SIZE' +
'\n --max-amplicon-size MAX_AMPLICON_SIZE' +
'\n --without-primers | --five-prim-primer FIVE_PRIM_PRIMER --three-prim-primer THREE_PRIM_PRIMER' +
'\n [--samples-names SAMPLE_NAME [SAMPLE_NAME ...]]' +
'\n [-p NB_CPUS] [--debug] [-v]' +
'\n [-d DEREPLICATED_FILE] [-c COUNT_FILE]' +
'\n [-s SUMMARY_FILE] [-l LOG_FILE]' +
'\n' +
'\n For samples archive:' +
'\n preprocess.py 454 ' +
'\n --input-archive ARCHIVE_FILE' +
'\n --already-contiged | --R1-size R1_SIZE --R2-size R2_SIZE --expected-amplicon-size SIZE' +
'\n --min-amplicon-size MIN_AMPLICON_SIZE' +
'\n --max-amplicon-size MAX_AMPLICON_SIZE' +
'\n --five-prim-primer FIVE_PRIM_PRIMER' +
'\n --three-prim-primer THREE_PRIM_PRIMER' +
'\n [-p NB_CPUS] [--debug] [-v]' +
'\n [-d DEREPLICATED_FILE] [-c COUNT_FILE]' +
'\n [-s SUMMARY_FILE] [-l LOG_FILE]'
)
parser.add_argument( '-v', '--version', action='version', version=__version__ )
subparsers = parser.add_subparsers()
# Illumina
parser_illumina = subparsers.add_parser('illumina', help='Illumina sequencers.')
# Illumina parameters
parser_illumina.add_argument( '--min-amplicon-size', type=int, required=True, help='The minimum size for the amplicons.' )
parser_illumina.add_argument( '--max-amplicon-size', type=int, required=True, help='The maximum size for the amplicons.' )
parser_illumina.add_argument( '--five-prim-primer', type=str, help="The 5' primer sequence (wildcards are accepted)." )
parser_illumina.add_argument( '--three-prim-primer', type=str, help="The 3' primer sequence (wildcards are accepted)." )
parser_illumina.add_argument( '--without-primers', action='store_true', default=False, help="Use this option when you use custom sequencing primers and these primers are the PCR primers. In this case the reads do not contain the PCR primers." )
parser_illumina.add_argument( '--R1-size', type=int, help='The read1 size.' )
parser_illumina.add_argument( '--R2-size', type=int, help='The read2 size.' )
parser_illumina.add_argument( '--expected-amplicon-size', type=int, help='The expected size for the majority of the amplicons (with primers).' )
parser_illumina.add_argument( '--already-contiged', action='store_true', default=False, help='The archive contains 1 file by sample : Reads 1 and Reads 2 are already contiged by pair.' )
parser_illumina.add_argument( '-p', '--nb-cpus', type=int, default=1, help="The maximum number of CPUs used." )
parser_illumina.add_argument( '--debug', default=False, action='store_true', help="Keep temporary files to debug program." )
# Illumina inputs
group_illumina_input = parser_illumina.add_argument_group( 'Inputs' )
group_illumina_input.add_argument( '--samples-names', nargs='+', default=None, help='The sample name for each R1/R2-files.' )
group_illumina_input.add_argument( '--input-archive', default=None, help='The tar file containing R1 file and R2 file for each sample.' )
group_illumina_input.add_argument( '--input-R1', required=None, nargs='+', help='The R1 sequence file for each sample (format: fastq).' )
group_illumina_input.add_argument( '--input-R2', required=None, nargs='+', help='The R2 sequence file for each sample (format: fastq).' )
group_illumina_input.set_defaults( sequencer='illumina' )
# Illumina outputs
group_illumina_output = parser_illumina.add_argument_group( 'Outputs' )
group_illumina_output.add_argument( '-d', '--output-dereplicated', default='dereplication.fasta', help='Fasta file with unique sequences. Each sequence has an ID ended with the number of initial sequences represented (example : ">a0101;size=10").')
group_illumina_output.add_argument( '-c', '--output-count', default='count.tsv', help='TSV file with count by sample for each unique sequence (example with 3 samples : "a0101<TAB>5<TAB>8<TAB>0").')
group_illumina_output.add_argument( '-s', '--summary', default='summary.html', help='HTML file with summary of filters results.')
group_illumina_output.add_argument( '-l', '--log-file', default=sys.stdout, help='This output file will contain several information on executed commands.')
# 454
parser_454 = subparsers.add_parser('454', help='454 sequencers.')
parser_454.add_argument( '--min-amplicon-size', type=int, required=True, help='The minimum size for the amplicons (with primers).' )
parser_454.add_argument( '--max-amplicon-size', type=int, required=True, help='The maximum size for the amplicons (with primers).' )
parser_454.add_argument( '--five-prim-primer', type=str, required=True, help="The 5' primer sequence (wildcards are accepted)." )
parser_454.add_argument( '--three-prim-primer', type=str, required=True, help="The 3' primer sequence (wildcards are accepted)." )
parser_454.add_argument( '-p', '--nb-cpus', type=int, default=1, help="The maximum number of CPUs used." )
parser_454.add_argument( '--debug', default=False, action='store_true', help="Keep temporary files to debug program." )
# 454 inputs
group_454_input = parser_454.add_argument_group( 'Inputs' )
group_454_input.add_argument( '--samples-names', nargs='+', default=None, help='The sample name for each R1/R2-files.' )
group_454_input.add_argument( '--input-archive', default=None, help='The tar file containing R1 file and R2 file for each sample (format: tar).' )
group_454_input.add_argument( '--input-R1', required=None, nargs='+', help='The sequence file for each sample (format: fastq).' )
group_454_input.set_defaults( sequencer='illumina' )
# 454 outputs
group_454_output = parser_454.add_argument_group( 'Outputs' )
group_454_output.add_argument( '-d', '--output-dereplicated', default='dereplication.fasta', help='Fasta file with unique sequences. Each sequence has an ID ended with the number of initial sequences represented (example : ">a0101;size=10").')
group_454_output.add_argument( '-c', '--output-count', default='count.tsv', help='TSV file with count by sample for each unique sequence (example with 3 samples : "a0101<TAB>5<TAB>8<TAB>0").')
group_454_output.add_argument( '-s', '--summary', default='summary.html', help='HTML file with summary of filters results.')
group_454_output.add_argument( '-l', '--log-file', default=sys.stdout, help='This output file will contain several information on executed commands.')
parser_454.set_defaults( sequencer='454' )
parser_454.set_defaults( already_contiged=True )
# Parse parameters
args = parser.parse_args()
# Check parameters
prevent_shell_injections(args)
Logger.static_write(args.log_file, "## Application\nSoftware: " + os.path.basename(sys.argv[0]) + " (version: " + str(__version__) + ")\nCommand: " + " ".join(sys.argv) + "\n\n")
if args.input_archive is not None: # input is an archive
if args.input_R1 is not None: raise Exception( "With '--archive-file' parameter you cannot set the parameter '--R1-files'." )
if args.samples_names is not None: raise Exception( "With '--archive-file' parameter you cannot set the parameter '--samples-names'." )
if args.sequencer == "illumina":
if args.input_R2 is not None: raise Exception( "With '--archive-file' parameter you cannot set the parameter '--R2-files'." )
else: # inputs are files
if args.input_R1 is None: raise Exception( "'--R1-files' is required." )
if args.sequencer == "illumina":
if not args.already_contiged and args.input_R2 is None: raise Exception( "'--R2-files' is required." )
if args.sequencer == "illumina":
if (args.R1_size is None or args.R2_size is None or args.expected_amplicon_size is None) and not args.already_contiged: raise Exception( "'--R1-size/--R2-size/--expected-amplicon-size' or '--already-contiged' must be setted." )
if args.without_primers:
if args.five_prim_primer or args.three_prim_primer: raise Exception( "The option '--without-primers' cannot be used with '--five-prim-primer' and '--three-prim-primer'." )
else:
if args.five_prim_primer is None or args.three_prim_primer is None: raise Exception( "'--five-prim-primer/--three-prim-primer' or 'without-primers' must be setted." )
# Process
process( args )
| gpl-3.0 | -5,037,706,322,682,687,000 | 50.626347 | 278 | 0.594994 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.