repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
lavalamp-/ws-backend-community | lib/debugging.py | 1 | 4429 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from uuid import uuid4
def clear_celery_queue():
"""
Clear out all tasks for the Web Sight Celery application.
:return: None
"""
from tasknode import websight_app
websight_app.control.purge()
def enqueue_database_debugging_task(*args, **kwargs):
"""
Create and enqueue a Celery task of the debugging_database_task type.
:param args: Positional arguments to pass to the task.
:param kwargs: Keyword arguments to pass to the task.
:return: None
"""
from tasknode.tasks import debugging_database_task
sig = debugging_database_task.si(*args, **kwargs)
sig.apply_async()
def get_debugging_network_service(ip_address=None, port=None, protocol=None):
"""
Get a OrganizationNetworkService attached to the debugging organization that points to
the given IP address, port, and protocol.
:param ip_address: The IP address for the service.
:param port: The port for the service.
:param protocol: The protocol for the service.
:return: A OrganizationNetworkService attached to the debugging organization that points to
the given IP address, port, and protocol.
"""
debugging_org = get_debugging_organization()
network = debugging_org.org_networks[0]
from .sqlalchemy import get_sa_session, get_or_create_network_service_from_org_ip, \
get_or_create_ip_address_from_org_network
db_session = get_sa_session()
address_model = get_or_create_ip_address_from_org_network(
network_uuid=network.uuid,
address=ip_address,
address_type="ipv4",
db_session=db_session,
)
service = get_or_create_network_service_from_org_ip(
ip_uuid=address_model.uuid,
port=port,
protocol=protocol,
db_session=db_session,
)
return service
def get_debugging_organization(
org_uuid=u"a9def2a2-54be-40d4-83bf-efc34cc2fbbc",
user_email=u"[email protected]",
):
"""
Create the default debugging organization for the specified user, or return it if it already
exists.
:param org_uuid: The UUID to give the organization.
:param user_email: The email address for the user to add the organization to.
:return: The debugging organization owned by the given user.
"""
from .sqlalchemy import Organization, Network, get_sa_session, get_organization_by_uuid, \
get_user_uuid_by_username
db_session = get_sa_session()
existing_org = get_organization_by_uuid(org_uuid=org_uuid, db_session=db_session)
if existing_org is not None:
return existing_org
user_uuid = get_user_uuid_by_username(username=user_email, db_session=db_session)
new_org = Organization.new(
uuid=org_uuid,
user_id=user_uuid,
name=u"Debugging Organization",
description=u"Debugging Organization Description",
scanning_status=0,
)
new_org_network = Network.new(
name=u"Debugging Network",
address=u"157.166.255.0",
mask_length=24,
scanning_enabled=True,
organization_id=org_uuid,
endpoint_count=0,
)
db_session.add(new_org)
db_session.add(new_org_network)
db_session.commit()
db_session.close()
return new_org
def perform_network_service_inspection(
org_uuid=None,
scan_uuid=None,
ip_address=None,
port=None,
protocol=None,
):
"""
Create and enqueue a Celery task of the inspect_network_service_for_organization type.
:param org_uuid: The UUID for the organization.
:param scan_uuid: The UUID for the scan.
:param ip_address: The IP address to check.
:param port: The port to check.
:param protocol: The protocol to use to connect to the remote service.
:return: None
"""
pass
# from tasknode.tasks import perform_network_service_inspection
# from wselasticsearch import bootstrap_index_model_mappings
# org_uuid = org_uuid if org_uuid is not None else str(uuid4())
# scan_uuid = scan_uuid if scan_uuid is not None else str(uuid4())
# bootstrap_index_model_mappings(index=org_uuid, delete_first=True)
# task_sig = perform_network_service_inspection.si(
# org_uuid=org_uuid,
# scan_uuid=scan_uuid,
# port=port,
# protocol=protocol,
# ip_address=ip_address,
# )
# task_sig.apply_async()
| gpl-3.0 | 6,063,669,412,163,870,000 | 33.874016 | 96 | 0.669677 | false |
GENETX/alpg | configs/example.py | 1 | 4948 | #!/usr/bin/python3
#Artifical load profile generator v1.1, generation of artificial load profiles to benchmark demand side management approaches
#Copyright (C) 2018 Gerwin Hoogsteen
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#This is an example configuration file!
# Select the output writer
import writer as writer
#Random seed
seed = 42
#input files:
weather_irradiation = 'input/weather/solarirradiation_twenthe.csv'
weather_timebaseDataset = 3600 #in seconds per interval
#Simulation:
#number of days to simulate and skipping of initial days. Simulation starts at Sunday January 1.
numDays = 365 # number of days
startDay = 0 # Initial day
#Select the geographic location. Refer to the Astral plugin to see available locations (or give a lon+lat)
# Use e.g. https://www.latlong.net/
from astral import Location
location = Location()
location.solar_depression = 'civil'
location.latitude = 52.239095
location.longitude = 6.857018
location.timezone = 'Europe/Amsterdam'
location.elevation = 0
#Select the devices in the neighbourhood
#Devices
#Scale overall consumption:
consumptionFactor = 1.0 #consumption was a bit too high
# Penetration of emerging technology in percentages
# all values must be between 0-100
# These indicate what percentage of the houses has a certain device
# Electric mobility, restriction that the sum <= 100
# Note, households with larger driving distances will receive EVs first
penetrationEV = 13
penetrationPHEV = 32
# PV and storage, restriction that Battery <= PV
# Note PV and battery size depend on the annual household consumption
# This emulates the Dutch "nul-op-the-meter regime (net zero annual electricity usage)
penetrationPV = 50
penetrationBattery = 10 #Note only houses with PV will receive a battery!
# Heating systems, with restriction that the sum <= 100
penetrationHeatPump = 25
penetrationCHP = 5 # Combined heat and power
penetrationInductioncooking = 25
#Device parameters:
#EV
capacityEV = 42000 #Wh
powerEV = 7400 #W
capacityPHEV = 12000 #Wh
powerPHEV = 3700 #W
#PV
PVProductionPerYear = 220 #average kWh per m2 solar panel on annual basis
PVAngleMean = 35 #degrees, 0 is horizontal to earth surface
PVAngleSigma = 10 #degrees
PVAzimuthMean = 180 #degrees, 0 is north, 90 is east
PVAzimuthSigma = 90 #degrees
PVEfficiencyMin = 15 #% of theoretical max
PVEfficiencyMax = 20 #% of theoretical max
#Driving distances
commuteDistanceMean = 25 #km
commuteDistanceSigma = 10 #km
#Battery
capacityBatteryLarge = 12000 #Wh
capacityBatteryMedium = 5000 #Wh
capacityBatterySmall = 2000 #Wh
powerBatteryLarge = 3700 #W
powerBatteryMedium = 3700 #W
powerBatterySmall = 3700 #W
#Kitchen
#Consumption of devices
ConsumptionOven = 2000 #W
ConsumptionMicroWave = 800 #W
ConsumptionStoveVentilation = 120 #W #But this is maximum, usually set lower!
ConsumptionInductionStove = 2200 #W #http://homeguides.sfgate.com/many-watts-induction-stove-85380.html
ConsumptionFridgeBigMin = 80 #W
ConsumptionFridgeBigMax = 120 #W
ConsumptionFridgeSmallMin = 50 #W
ConsumptionFridgeSmallMax = 80 #W
ConsumptionKettle = 2000 #W
#White goods
ConsumptionIron = 2000 #W
ConsumptionVacuumcleaner = 1500 #W
#House
ConsumptionHouseVentilation = 50 #W
#Household randomization
#all values must be between 0-1000
familyOutingChanceMin = 10 #percentage
familyOutingChanceMax = 20 #percentage
personWeekdayActivityChanceMin = 20 #percentage
personWeekdayActivityChanceMax = 30 #percentage
personWeekendActivityChanceMin = 20 #percentage
personWeekendActivityChanceMax = 30 #percentage
householdList = []
#Select the types of households
import households
for i in range(0,1):
householdList.append(households.HouseholdSingleWorker())
for i in range(0,2):
householdList.append(households.HouseholdSingleRetired())
for i in range(0,1):
householdList.append(households.HouseholdDualWorker(True))
for i in range(0,1):
householdList.append(households.HouseholdDualWorker(False))
for i in range(0,2):
householdList.append(households.HouseholdDualRetired())
for i in range(0,2):
householdList.append(households.HouseholdFamilyDualWorker(True))
for i in range(0,1):
householdList.append(households.HouseholdFamilyDualWorker(False))
| gpl-3.0 | 4,827,155,180,199,970,000 | 28.628743 | 126 | 0.760509 | false |
woodem/woo | examples/perf/showPlots.py | 1 | 2971 | import numpy as np
from pprint import pprint
dd={}
for l in open('timings.txt'):
if l.startswith('#'): continue
ll=l[:-1].split()
if len(ll)==0: continue
tag,cores,nPar,nSteps=ll[0],int(ll[1]),int(ll[2]),int(ll[3])
t1,t,colliderRel=[float(i) for i in ll[4:]]
key=(tag,cores,nPar,nSteps)
data=[t1,t,colliderRel]
if key not in dd: dd[key]=[data]
else: dd[key]+=[data]
# compute averages
for k in dd: dd[k]=tuple([np.average(d) for d in zip(*dd[k])])
# nn=set()
# for k in dd: nn.add((k[1],k[2]))
out=[]
#refTag,cmpTag='par2_threadSafe','par3_oneMutex'
#refTag,cmpTag='par1','par3_oneMutex'
#refTag,cmpTag='orig','parBounds'
#refTag,cmpTag='noInvFast','par3_oneMutex'
#refTag,cmpTag='par1','par4_shortCircuit'
#refTag,cmpTag='par4_shortCircuit','parBounds'
#refTag,cmpTag='parBounds','gcc49'
#refTag,cmpTag='orig','ompTuneSort1_10k_0'
#refTag,cmpTag='r3547','r3552'
refTag,cmpTag='r3530','iniConParallel'
for k in sorted(dd.keys()):
if k[0]==refTag: continue
if k[0]!=cmpTag: continue
refKey=(refTag,k[1],k[2],k[3])
if refKey not in dd.keys(): continue
for i,name in enumerate(['t1','t','coll%']):
# if i==1 or i==2: continue
# if i!=2: continue
if i!=0: continue
val0=dd[refKey][i]
val=dd[k][i]
out+=[[k[1],k[2],k[3],name,refTag,val0,k[0],val,'%.2f%%'%(100*(val-val0)/val0)]]
# import prettytable
# print prettytable.PrettyTable(out,border=False)
# pprint(out)
# print out
for o in out:
print('\t'.join([str(oo) for oo in o]))
import pylab
cores=set([k[1] for k in dd.keys() if k[0]==cmpTag])
steps=set([k[3] for k in dd.keys() if k[0]==cmpTag])
nPar=set([k[2] for k in dd.keys() if k[0]==cmpTag])
#cores=[1]
if 0:
for core in cores:
for step in steps:
nPar=sorted(list(set([k[2] for k in dd.keys() if (cmpTag,core,k[2],step) in dd.keys() and (refTag,core,k[2],step) in dd.keys()])))
print(core,step,nPar)
pylab.plot(nPar,[dd[refTag,core,N,step][1] for N in nPar],label='%s, %d cores'%(refTag,core))
pylab.plot(nPar,[dd[cmpTag,core,N,step][1] for N in nPar],label='%s, %d cores'%(cmpTag,core),linewidth=4,alpha=.5)
pylab.xlabel('Number of particles')
pylab.ylabel('Time per one step [s]')
pylab.grid(True)
pylab.legend(loc='best')
if 1:
pylab.figure()
for core in cores:
for step in steps:
nPar=sorted(list(set([k[2] for k in dd.keys() if (cmpTag,core,k[2],step) in dd.keys() and (refTag,core,k[2],step) in dd.keys()])))
print(core,step,nPar)
pylab.plot(nPar,[dd[refTag,core,N,step][0] for N in nPar],label='%s, %d cores'%(refTag,core))
pylab.plot(nPar,[dd[cmpTag,core,N,step][0] for N in nPar],label='%s, %d cores'%(cmpTag,core),linewidth=4,alpha=.5)
pylab.xlabel('Number of particles')
pylab.ylabel('Time of the intial sort [s]')
pylab.grid(True)
pylab.legend(loc='best')
pylab.show()
| gpl-2.0 | 5,512,987,470,357,769,000 | 33.952941 | 142 | 0.612252 | false |
schristakidis/p2ner | p2ner/components/serveroverlay/centralserver/centralserver/messages/messageobjects.py | 1 | 2833 | # -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from construct import Container
from p2ner.base.Consts import MessageCodes as MSG
from p2ner.base.ControlMessage import trap_sent, BaseControlMessage,probe_all,ControlMessage
class StreamMessage(BaseControlMessage):
type = "streammessage"
code = MSG.STREAM
ack = True
@classmethod
def send(cls, stream, peer, out):
#cls.log.debug('sending stream message to %s',peer)
return out.send(cls, Container(stream=stream), peer).addErrback(trap_sent)
class PeerListMessage(BaseControlMessage):
type = "peerlistmessage"
code = MSG.SEND_IP_LIST
ack = True
@classmethod
def send(cls, sid, peerlist, peer, out):
#cls.log.debug('sending peerList message to %s',peer)
msg = Container(streamid = sid, peer = peerlist)
return out.send(cls, msg, peer).addErrback(trap_sent)
class PeerListProducerMessage(PeerListMessage):
type = "peerlistmessage"
code = MSG.SEND_IP_LIST_PRODUCER
ack = True
class PeerRemoveMessage(BaseControlMessage):
type = "peerlistmessage"
code = MSG.REMOVE_NEIGHBOURS
ack = True
@classmethod
def send(cls, sid, peerlist, peer, out):
#cls.log.debug('sending peerRemove message to %s',peer)
msg = Container(streamid = sid, peer = peerlist)
return out.send(cls, msg, peer).addErrback(trap_sent)
class PeerRemoveProducerMessage(PeerRemoveMessage):
type = "peerlistmessage"
code = MSG.REMOVE_NEIGHBOURS_PRODUCER
ack = True
class SuggestNewPeerMessage(ControlMessage):
type = "peerlistmessage"
code = MSG.SUGGEST_NEW_PEER
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received suggest new peer message from %s',peer)
self.overlay.suggestNewPeer(peer,message.peer)
class SuggestMessage(BaseControlMessage):
type = "peerlistmessage"
code = MSG.SUGGEST
ack = True
@classmethod
def send(cls, sid, peerlist, peer, out):
return out.send(cls, Container(streamid=sid, peer=peerlist), peer).addErrback(trap_sent)
| apache-2.0 | -8,950,229,297,693,526,000 | 31.563218 | 98 | 0.692199 | false |
dfehrenbach/Swen343_Human_Resources | hr/controllers/authentication.py | 1 | 1073 | """ This is the controller of the /confirm_login endpoint
The following functions are called from here: GET
"""
import logging
import requests
import employees
logging.basicConfig(filename='./log.txt',format='%(asctime)s :: %(name)s :: %(message)s')
logger = logging.getLogger(__name__)
def get(department="",token=""):
""" This is the GET function that will return an object with an employee id if they are authenticated.
:param token:
:return: an object with employee_id
"""
response = requests.post('https://www.googleapis.com/oauth2/v3/tokeninfo',{'access_token': token})
logger.info(response)
if response.status_code == 200:
email = response.json()["email"]
emps = employees.get()
for e in emps["employee_array"]:
employee_department = e["department"].replace(" ","")
if e["email"] == email and (employee_department == department or employee_department == "Board"):
return {"employee_id": e["employee_id"]}
return {'error_message': 'User is not authenticated'}, 400
| mit | -5,626,718,884,545,181,000 | 36 | 109 | 0.65424 | false |
BiaDarkia/scikit-learn | sklearn/cluster/k_means_.py | 1 | 61736 | """K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import _num_samples
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from ..exceptions import ConvergenceWarning
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : int, RandomState instance
The generator used to initialize the centers. Use an int to make the
randomness deterministic.
See :term:`Glossary <random_state>`.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
verbose : boolean, optional
Verbosity mode.
tol : float, optional
The relative increment in the results before declaring convergence.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True (default), then the original data is
not modified, ensuring X is C-contiguous. If False, the original data
is modified, and put back before the function returns, but small
numerical differences may be introduced by subtracting and then adding
the data mean, in this case it will also not ensure that data is
C-contiguous which may cause a significant slowdown.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
# avoid forcing order when copy_x=False
order = "C" if copy_x else None
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32],
order=order, copy=copy_x)
# verify that the number of samples given is larger than k
if _num_samples(X) < n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
_num_samples(X), n_clusters))
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
distinct_clusters = len(set(best_labels))
if distinct_clusters < n_clusters:
warnings.warn("Number of distinct clusters ({}) found smaller than "
"n_clusters ({}). Possibly due to duplicate points "
"in X.".format(distinct_clusters, n_clusters),
ConvergenceWarning, stacklevel=2)
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise TypeError("algorithm='elkan' not supported for sparse input X")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of squared distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of squared distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
verbose : int, default 0
Verbosity mode.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True (default), then the original data is
not modified, ensuring X is C-contiguous. If False, the original data
is modified, and put back before the function returns, but small
numerical differences may be introduced by subtracting and then adding
the data mean, in this case it will also not ensure that data is
C-contiguous which may cause a significant slowdown.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of squared distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[1., 2.],
[4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
y : Ignored
"""
random_state = check_random_state(self.random_state)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
y : Ignored
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
y : Ignored
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
y : Ignored
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization and to
pick new clusters amongst observations with uniform probability. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(X.shape[0], replace=False,
size=n_reassigns)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X, new_centers.astype(np.intp),
np.where(to_reassign)[0].astype(np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
batch_size : int, optional, default: 100
Size of the mini batches.
verbose : boolean, optional
Verbosity mode.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization and
random reassignment. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d"
% (n_samples, self.n_clusters))
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shape (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster. It must be noted that
X will be copied if it is not C-contiguous.
y : Ignored
"""
X = check_array(X, accept_sparse="csr", order="C")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause | 8,976,565,480,837,188,000 | 37.730238 | 81 | 0.611701 | false |
vaishaksuresh/udacity_data_analyst | P2/ProblemSets_2_to_4/P2_01.py | 1 | 1464 | import pandas
import pandasql
def num_rainy_days(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return one column and
one row - a count of the number of days in the dataframe where
the rain column is equal to 1 (i.e., the number of days it
rained). The dataframe will be titled 'weather_data'. You'll
need to provide the SQL query. You might find SQL's count function
useful for this exercise. You can read more about it here:
https://dev.mysql.com/doc/refman/5.1/en/counting-rows.html
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be equal to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q = """
select count(*) from weather_data where cast(rain as integer) = 1
"""
#Execute your SQL command against the pandas frame
rainy_days = pandasql.sqldf(q.lower(), locals())
return rainy_days
| gpl-2.0 | 7,580,475,719,071,482,000 | 38.567568 | 93 | 0.70082 | false |
IvanRybakov/cachewarmer | cw.py | 1 | 3233 |
import urllib2 as ur
import re, traceback
import sys
import os
from scw.fetcher import Fetcher
from scw.app import App
class CacheWarmer():
def __init__(self, sitemap, processes = 100):
self.processes = processes
self.active_threads = []
self.app = App()
self.urls = []
self.updated_count = 0
self.fetched_count = 0
self.sitemap_url = sitemap
self.code_statistics = {}
self.average_time = 0.0
def start(self):
"""
Execute the main process
"""
self.app.printflush('Sitemap: ' + self.sitemap_url)
self.getUrlsList()
self.app.printflush('Fetched: ' + str(self.fetched_count))
self.app.printflush('Processes: ' + str(self.processes))
self.CheckURLs()
self.printReport()
def printReport(self):
"""
Print a report after process execution
"""
self.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)
self.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)
def getUrlsList(self):
"""
Fetch an URLs list from website XML sitemap
"""
try:
f = ur.urlopen(self.sitemap_url)
res = f.readlines()
for d in res:
data = re.findall('<loc>(https?:\/\/.+?)<\/loc>',d)
for i in data:
self.urls.append(i)
except Exception as e:
self.app.printflush(str(e))
self.app.printflush(traceback.format_exc())
self.fetched_count = len(self.urls)
def CheckURLs(self):
"""
Start multy-threading requests to website
"""
self.updated_count = 0
self.app.setExitFlag(False)
try:
parsed_params = self.urls
while (parsed_params):
self.active_threads = []
while True:
while len(self.active_threads) < self.processes and len(parsed_params) > 0:
urlItem = parsed_params.pop()
if urlItem != None:
thread = Fetcher(self.app, urlItem)
thread.start()
self.active_threads.append( thread )
if self.app.getExitFlag():
break
if len( self.active_threads ) == 0:
break
else:
for thread in self.active_threads:
if not thread.isAlive():
thread.printStatus()
self.collectStat(thread)
self.active_threads.remove(thread)
if self.app.getExitFlag():
break
except KeyboardInterrupt as e:
self.app.setExitFlag(True)
except Exception as e:
self.app.printflush(traceback.format_exc())
def collectStat(self, thread):
"""
Collect statistic for a request
"""
# update average page load time
if self.updated_count == 0:
self.average_time = thread.load_time
else:
self.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)
# update stitistics by HTTP code
if thread.code not in self.code_statistics:
self.code_statistics[thread.code] = 1
else:
self.code_statistics[thread.code] += 1
# update count of processed pages
self.updated_count += 1
| mit | -5,689,395,381,370,856,000 | 28.126126 | 109 | 0.67275 | false |
t-neumann/slamdunk | slamdunk/contrib/RNASeqReadSimulator/src/gensimreads.py | 1 | 9905 | #!/usr/bin/env python
"""
This script generates simulated RNA-Seq reads (in .bed format) from known gene annotations.
USAGE
gensimreads.py {OPTIONS} <BED-File|->
PARAMETER
BED-File\tThe gene annotation file (in BED format). Use '-' for STDIN input
OPTIONS
-e/--expression [expression level file] \tSpecify the weight of each transcript. Each line in the file should have at least (NFIELD+1) fields, with field 0 the annotation id, and field NFIELD the weight of this annoation. If this file is not provided, uniform weight is applied.
-n/--nreads readcnt \tSpecify the number of reads to be generated. Default 100000.
-b/--posbias [positional bias file] \tSpecify the positional bias file. The file should include at least 100 lines, each contains only one integer number, showing the preference of the positional bias at this position. If no positional bias file is specified, use uniform distribution bias.
-l/--readlen [read length] \tSpecify the read length. Default 32.
-o/--output [output .bed file] \tSpecify the output file. Default STDOUT
-f/--field [NFIELD] \tThe field of each line as weight input. Default 7 (beginning from field 0) to compatible to genexplvprofile.py.
-p/--pairend [PELENMEAN,PELENSTD]\t Generate paired-end reads with specified insert length mean and standard derivation. The default is 200,20.
--stranded \tThe reads are strand specific.
NOTE
1. The bed file is required to sort according to the chromosome name and position. In Unix systems, use "sort -k 1,1 -k 2,2n in.BED > out.BED" to get a sorted version (out.BED) of the bed file (in.BED).
2. No problem to handle reads spanning multiple exons.
HISTORY
04/30/2012
Support generating stranded RNA-Seq reads
02/16/2012
Now runs on python 2.7
02/08/2012
Change default value of NFIELD from 4 to 7 to be compatible with default genexplvprofile values.
01/29/2012
Add paired-end support.
01/09/2012
Add -f option.
AUTHOR
Wei Li (li.david.wei AT gmail.com)
"""
from __future__ import print_function
import sys;
import subprocess;
import pydoc;
import os;
import random;
import bisect;
import math;
from getSegs import *;
import pdb;
# read length
readlen=32;
# number of reads to sample
readcnt=100000;
nfield=7;
if len(sys.argv)<2:
print(pydoc.render_doc(sys.modules[__name__]));
sys.exit();
allids={};
allidl=[];
allexp=[];
posweight=[];
#onbedfile=sys.argv[-1]+'.reads.bed';
onbedfile="-";
genpereads=False;
pemean=200;
pestd=20;
stranded=False;
for i in range(len(sys.argv)):
if i<len(sys.argv)-1:
if sys.argv[i]=='-e' or sys.argv[i]=='--expression':
# parse the annoatation file, and sum up the weights
nline=0;
totalweight=0;
print('Reading annoatation file...',file=sys.stderr);
for lines in open(sys.argv[i+1]):
nline=nline+1;
if lines[0]=='#':
continue;
fields=lines.strip().split();
if len(fields)<nfield+1:
print('Error: the annotation file should include at least '+str(nfield+1)+' fields.',file=sys.stderr);
sys.exit();
allids[fields[0]]=0;
totalweight+=float(fields[nfield]);
allexp.append(totalweight);
allidl.append(fields[0]);
print('Read %d lines of the annoatation' % nline,file=sys.stderr);
#print('Total weight: %f' % sum(totalweight));
if sys.argv[i]=='-b' or sys.argv[i]=='--posbias':
bline=0;
tbweight=0;
for lines in open(sys.argv[i+1]):
bline=bline+1;
if bline>100:
break;
tbweight=float(lines.strip());
posweight.append(tbweight);
if len(posweight)!=100:
print('Error: the bias file should include at least 100 lines.',file=sys.stderr);
sys.exit();
if sys.argv[i]=='-n' or sys.argv[i]=='--nreads':
readcnt=int(sys.argv[i+1]);
print('Read count:',readcnt,file=sys.stderr);
if sys.argv[i]=='-l' or sys.argv[i]=='--readlen':
readlen=int(sys.argv[i+1]);
print('Read length:',readlen,file=sys.stderr);
if sys.argv[i]=='-o' or sys.argv[i]=='--output':
onbedfile=sys.argv[i+1];
print('Output bed file:',onbedfile,file=sys.stderr);
if sys.argv[i]=='-f' or sys.argv[i]=='--field':
nfield=int(sys.argv[i+1]);
print('Field:',nfield,file=sys.stderr);
if sys.argv[i]=='-p' or sys.argv[i]=='--pairend':
genpereads=True;
pef=sys.argv[i+1].split(',');
pemean=int(pef[0]);
pestd=int(pef[1]);
print('Generate paired-end reads with mean and std '+str(pemean)+','+str(pestd),file=sys.stderr);
if sys.argv[i]=='-h' or sys.argv[i]=='--help':
print(pydoc.render_doc(sys.modules[__name__]));
sys.exit();
if sys.argv[i]=='--stranded':
stranded=True;
bedfile=sys.argv[-1];
# if no annotation file is specified, use uniform distri.
print('Assigning weights...',file=sys.stderr);
if len(allexp)==0:
totalweight=0;
for lines in open(bedfile):
bedfield=lines.strip().split();
allids[bedfield[3]]=0;
totalweight+=1;
allexp.append(totalweight);
allidl.append(bedfield[3]);
# sampling process
print('Sampling...',file=sys.stderr);
for j in range(readcnt):
k=random.random()*totalweight;
sel=bisect.bisect_right(allexp,k);
allids[allidl[sel]]=allids[allidl[sel]]+1;
# if no bias file specified, use uniform distrib
print('Total assigned reads:',sum(allids.values()),file=sys.stderr);
#debug info:
#for k in allidl:
# print (k, allids[k]);
#sys.exit();
if onbedfile!="-":
onfid=open(onbedfile,'w');
else:
onfid=sys.stdout;
nlines=0;
totalgenreads=0;
# read bed file
for lines in open(bedfile):
# update line counter
nlines=nlines+1;
if nlines %10000==1:
print('Processing '+str(nlines)+' lines...',file=sys.stderr);
# parse lines
bedfield=lines.strip().split();
if len(bedfield)!=12:
print('Error: incorrect number of fields (should be 12)',file=sys.stderr);
continue;
if bedfield[5]=='+':
direction=1;
elif bedfield[5]=='-':
direction=-1;
else:
print('Error: incorrect field in field[5] %s:' %bedfield[5],file=sys.stderr);
if bedfield[3] not in allids:
# the current id not found, continue
continue;
nreads=allids[bedfield[3]];
if nreads<1:
continue;
# parse all segments
fieldrange=(int(bedfield[1]),int(bedfield[2]));
if bedfield[10][-1]==',':
bedfield[10]=bedfield[10][:-1];
if bedfield[11][-1]==',':
bedfield[11]=bedfield[11][:-1];
exonlen=[int(x) for x in bedfield[10].split(',')];
exonstart=[int(x)+fieldrange[0] for x in bedfield[11].split(',')];
# old code: for each possible position in the transcript, build its segments
# for ne in range(len(exonlen)):
# for pos in range(exonstart[ne],exonstart[ne]+exonlen[ne]):
# create a position
totallen=sum(exonlen);
# here, we randomly choose one position
if genpereads==False:
selrange=totallen-readlen+1;
else:
selrange=totallen-pemean+2*pestd;
if selrange<1:
if genpereads==False:
print('Ignore annoatation',bedfield[3],'of length',totallen,'Reads:',allids[bedfield[3]],file=sys.stderr);
else:
print('Ignore annoatation',bedfield[3],'of length',totallen,'since its shorter than paired-end mean insert length. Reads:',allids[bedfield[3]],file=sys.stderr);
continue;
totalgenreads+=nreads;
cumlen=[];cumlen.extend(exonlen);
for i in range(1,len(cumlen)):
cumlen[i]=cumlen[i]+cumlen[i-1];
# for nun-uniform distribution, construct a new array for selection
thistbweight=[];
if len(posweight)!=0:
kweight=0;
for i in range(selrange):
nfrac=i*100.0/selrange; # a value between 0-100
nlower=int(math.floor(nfrac)); # 0-100
nhigher=int(math.ceil(nfrac)); # 0-100
if nhigher==nlower: nhigher=nlower+1;
if nhigher<100:
val=posweight[nlower]*(nfrac-nlower)+posweight[nhigher]*(nhigher-nfrac);
else:
val=posweight[99];
kweight+=val;
thistbweight.append(kweight);
for t in range(nreads):
if len(posweight)==0:
tpos=random.choice(range(selrange));
else:
rd=random.random()*kweight;
bsl=bisect.bisect_right(thistbweight,rd);
# for reverse transcripts: flip the position
if direction==-1:
bsl=selrange-1-bsl;
tpos=bsl;
pos=tpos2pos(tpos,cumlen,exonstart);
if genpereads==True:
tpos2=tpos+int(random.normalvariate(pemean-readlen+1,pestd));
pos2=tpos2pos(tpos2,cumlen,exonstart);
# get the segments
if True:
(startrange,lenrange,status)=getSegs(pos,readlen,1,exonstart,exonlen);
if status!=0:
print('Status:',status,', pos:', pos,'out of',len(cumlen),file=sys.stderr);
#pdb.set_trace();
continue;
# generate another pair
if genpereads==True:
(startrange2,lenrange2,status2)=getSegs(pos2,readlen,1,exonstart,exonlen);
if status==1:
print('Status:',status,', pos:', pos,'out of',len(cumlen),file=sys.stderr);
if genpereads==False:
lineid="%s_e_%d_%s_%d" % (bedfield[3],t,bedfield[0],pos);
else:
lineid="%s_e_%d_%s_%d/1" % (bedfield[3],t,bedfield[0],pos);
lineid2="%s_e_%d_%s_%d/2" % (bedfield[3],t,bedfield[0],pos);
# random direction
if stranded==False or direction==0:
thisdir=random.choice([1,-1]);
else:
thisdir=direction;
writeBedline(onfid,lineid,bedfield[0],thisdir,startrange,lenrange);
if genpereads==True:
writeBedline(onfid,lineid2,bedfield[0],thisdir*(-1),startrange2,lenrange2);
else:
print(bedfield[0],file=sys.stdout);
#print('Pospool:');
#for k in sorted(pospool.keys()):
# print(str(k)+":"+str(pospool[k]),end=",");
#print();
print('Total '+str(nlines)+' lines...',file=sys.stderr);
print('Total '+str(totalgenreads)+' reads...',file=sys.stderr);
if onbedfile!="-":
onfid.close();
| agpl-3.0 | 3,342,112,633,374,298,000 | 30.645367 | 292 | 0.651186 | false |
APTrust/EarthDiver | dpnode/dpn/client/management/commands/accept_transfers.py | 1 | 1510 | """
'Contrary to what people may say, there is no upper limit to stupidity.'
- Stephen Colbert
"""
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from dpn.data.models import Node
from dpn.client.tasks import accept_transfers
class Command(BaseCommand):
help = 'Pulls registry entries from all nodes or named node only if ' \
'specified.'
option_list = BaseCommand.option_list + (
make_option("--node",
dest="namespace",
default=None,
help="Namespace of specific node to pull registry entries from."
),
make_option("--max",
dest="max",
default=settings.DPN_MAX_ACCEPT,
help="Max number of transfer to mark as accepted at once."
)
)
def handle(self, *args, **options):
nodes = Node.objects.exclude(api_root__isnull=True).exclude(
api_root__exact='').exclude(namespace=settings.DPN_NAMESPACE)
nodes = nodes.filter(replicate_from=True)
if options['namespace']:
nodes.filter(namespace=options['namespace'])
if not nodes:
raise CommandError("No nodes found to query.")
for node in nodes:
accept_transfers(node, options['max'])
self.stdout.write("Done accepting transfers from %s" %
node.namespace) | apache-2.0 | -3,021,411,262,614,179,300 | 32.577778 | 78 | 0.59404 | false |
eduNEXT/edunext-ecommerce | ecommerce/courses/publishers.py | 1 | 6029 | from __future__ import absolute_import, unicode_literals
import json
import logging
import six
from django.utils.translation import ugettext_lazy as _
from edx_rest_api_client.exceptions import SlumberHttpBaseException
from oscar.core.loading import get_model
from ecommerce.core.constants import ENROLLMENT_CODE_SEAT_TYPES
from ecommerce.courses.utils import mode_for_product
logger = logging.getLogger(__name__)
Product = get_model('catalogue', 'Product')
StockRecord = get_model('partner', 'StockRecord')
class LMSPublisher:
def get_seat_expiration(self, seat):
if not seat.expires or 'professional' in getattr(seat.attr, 'certificate_type', ''):
return None
return seat.expires.isoformat()
def get_course_verification_deadline(self, course):
return course.verification_deadline.isoformat() if course.verification_deadline else None
def serialize_seat_for_commerce_api(self, seat):
""" Serializes a course seat product to a dict that can be further serialized to JSON. """
stock_record = seat.stockrecords.first()
bulk_sku = None
if getattr(seat.attr, 'certificate_type', '') in ENROLLMENT_CODE_SEAT_TYPES:
enrollment_code = seat.course.enrollment_code_product
if enrollment_code:
bulk_sku = enrollment_code.stockrecords.first().partner_sku
return {
'name': mode_for_product(seat),
'currency': stock_record.price_currency,
'price': int(stock_record.price_excl_tax),
'sku': stock_record.partner_sku,
'bulk_sku': bulk_sku,
'expires': self.get_seat_expiration(seat),
}
def publish(self, course):
""" Publish course commerce data to LMS.
Uses the Commerce API to publish course modes, prices, and SKUs to LMS. Uses
CreditCourse API endpoints to publish CreditCourse data to LMS when necessary.
Arguments:
course (Course): Course to be published.
Returns:
None, if publish operation succeeded; otherwise, error message.
"""
site = course.partner.default_site
course_id = course.id
error_message = _('Failed to publish commerce data for {course_id} to LMS.').format(course_id=course_id)
name = course.name
verification_deadline = self.get_course_verification_deadline(course)
modes = [self.serialize_seat_for_commerce_api(seat) for seat in course.seat_products]
has_credit = 'credit' in [mode['name'] for mode in modes]
if has_credit:
try:
data = {
'course_key': course_id,
'enabled': True
}
credit_api_client = site.siteconfiguration.credit_api_client
credit_api_client.courses(course_id).put(data)
logger.info('Successfully published CreditCourse for [%s] to LMS.', course_id)
except SlumberHttpBaseException as e:
# Note that %r is used to log the repr() of the response content, which may sometimes
# contain non-ASCII Unicode. We don't know (or want to guess) the encoding, so using %r will log the
# raw bytes of the message, freeing us from the possibility of encoding errors.
logger.exception(
'Failed to publish CreditCourse for [%s] to LMS. Status was [%d]. Body was [%s].',
course_id,
e.response.status_code,
e.content.decode('utf-8')
)
return error_message
except: # pylint: disable=bare-except
logger.exception('Failed to publish CreditCourse for [%s] to LMS.', course_id)
return error_message
try:
data = {
'id': course_id,
'name': name,
'verification_deadline': verification_deadline,
'modes': modes,
}
commerce_api_client = site.siteconfiguration.commerce_api_client
commerce_api_client.courses(course_id).put(data=data)
logger.info('Successfully published commerce data for [%s].', course_id)
return None
except SlumberHttpBaseException as e: # pylint: disable=bare-except
logger.exception(
'Failed to publish commerce data for [%s] to LMS. Status was [%d]. Body was [%s].',
course_id,
e.response.status_code,
e.content.decode('utf-8')
)
return self._parse_error(e.content.decode('utf-8'), error_message)
except Exception: # pylint: disable=broad-except
logger.exception('Failed to publish commerce data for [%s] to LMS.', course_id)
return error_message
def _parse_error(self, response, default_error_message):
"""When validation errors occur during publication, the LMS is expected
to return an error message.
Arguments:
response (Response): A 'Response' object which contains json error message.
default_error_message (str) : default error message in case of exception.
Returns:
string: Returns the error message extracted from response.content
along with default message. If no message is available in response
then default message will be return.
"""
message = None
try:
data = json.loads(response)
if isinstance(data, six.string_types):
message = data
elif isinstance(data, dict) and data:
message = list(data.values())[0]
if isinstance(message, list):
message = message[0]
except Exception: # pylint: disable=broad-except
pass
if message:
return ' '.join([default_error_message, message])
return default_error_message
| agpl-3.0 | 152,557,083,518,754,620 | 40.294521 | 116 | 0.603914 | false |
Nic30/HWToolkit | hwt/synthesizer/rtlLevel/netlist.py | 1 | 8089 | from typing import List, Optional, Union
from hdlConvertorAst.hdlAst._defs import HdlIdDef
from hdlConvertorAst.hdlAst._expr import HdlValueId
from hdlConvertorAst.hdlAst._structural import HdlModuleDec, HdlModuleDef, \
HdlCompInst
from hwt.code import If
from hwt.hdl.operatorDefs import AllOps
from hwt.hdl.types.defs import BIT
from hwt.hdl.value import HValue
from hwt.serializer.utils import HdlStatement_sort_key, RtlSignal_sort_key
from hwt.synthesizer.dummyPlatform import DummyPlatform
from hwt.synthesizer.exceptions import SigLvlConfErr
from hwt.synthesizer.interfaceLevel.mainBases import InterfaceBase
from hwt.synthesizer.param import Param
from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import\
markVisibilityOfSignalsAndCheckDrivers
from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal, NOT_SPECIFIED
from hwt.synthesizer.rtlLevel.rtlSyncSignal import RtlSyncSignal
from hwt.synthesizer.rtlLevel.statements_to_HdlStmCodeBlockContainers import\
statements_to_HdlStmCodeBlockContainers
from hwt.doc_markers import internal
class RtlNetlist():
"""
Hierarchical container for signals
:ivar ~.parent: optional parent for debug and late component inspection
:ivar ~.signals: set of all signals in this context
:ivar ~.statements: list of all statements which are connected to signals in this context
:ivar ~.subUnits: is set of all units in this context
:type ~.interfaces: Dict[RtlSignal, DIRECTION]
:ivar ~.interfaces: initialized in create_HdlModuleDef
:type ~.ent: HdlModuleDec
:ivar ~.ent: initialized in create_HdlModuleDec
:type ~.arch: HdlModuleDef
:ivar ~.arch: initialized in create_HdlModuleDef
:ivar ~.hdl_objs: The list of HDL objects which were produced by this instance
usually contains HdlModudeleDef but may contain imports/globals etc.
"""
def __init__(self, parent: Optional["Unit"]=None):
self.parent = parent
self.signals = set()
self.statements = set()
self.subUnits = set()
self.interfaces = {}
self.hdl_objs = []
self.ent = None
self.arch = None
self._port_items = []
def sig(self, name, dtype=BIT, clk=None, syncRst=None,
def_val=None, nop_val=NOT_SPECIFIED) -> Union[RtlSignal, RtlSyncSignal]:
"""
Create new signal in this context
:param clk: clk signal, if specified signal is synthesized
as SyncSignal
:param syncRst: synchronous reset signal
:param def_val: a default value used for reset and intialization
:param nop_val: a value which is used to drive the signal if there is no other drive
(used to prevent latches and to specify default values for unconnected signals)
"""
_def_val = _try_cast_any_to_HValue(def_val, dtype, True)
if nop_val is not NOT_SPECIFIED:
nop_val = _try_cast_any_to_HValue(nop_val, dtype, False)
if clk is not None:
s = RtlSyncSignal(self, name, dtype, _def_val, nop_val)
if syncRst is not None and def_val is None:
raise SigLvlConfErr(
"Probably forgotten default value on sync signal %s", name)
# dst_resolve_fn is overriden because default assign would assign to the "next" signal
if syncRst is not None:
r = If(syncRst._isOn(),
s(_def_val, dst_resolve_fn=lambda x: x)
).Else(
s(s.next, dst_resolve_fn=lambda x: x)
)
else:
r = [
s(s.next, dst_resolve_fn=lambda x: x)
]
if isinstance(clk, (InterfaceBase, RtlSignal)):
clk_trigger = clk._onRisingEdge()
else:
# has to be tuple of (clk_sig, AllOps.RISING/FALLING_EDGE)
clk, clk_edge = clk
if clk_edge is AllOps.RISING_EDGE:
clk_trigger = clk._onRisingEdge()
elif clk_edge is AllOps.FALLING_EDGE:
clk_trigger = clk._onRisingEdge()
else:
raise ValueError(
"Invalid clock edge specification", clk_edge)
If(clk_trigger,
r
)
else:
if syncRst:
raise SigLvlConfErr(
f"Signal {name:s} has reset but has no clk")
s = RtlSignal(self, name, dtype, def_val=_def_val, nop_val=nop_val)
return s
def create_HdlModuleDec(self, name: str,
store_manager: "StoreManager",
params: List[Param]):
"""
Generate a module header (entity) for this module
"""
self.ent = ent = HdlModuleDec()
ent.name = store_manager.name_scope.checked_name(name, ent)
ns = store_manager.hierarchy_push(ent)
# create generics
for p in sorted(params, key=lambda x: x._name):
hdl_val = p.get_hdl_value()
v = HdlIdDef()
v.origin = p
v.name = p.hdl_name = ns.checked_name(p._name, p)
v.type = hdl_val._dtype
v.value = hdl_val
ent.params.append(v)
return ent
def create_HdlModuleDef(self,
target_platform: DummyPlatform,
store_manager: "StoreManager"):
"""
Generate a module body (architecture) for this module
* Resolve name collisions
* Convert netlist representation to HdlProcesses
* Remove unconnected
* Mark visibility of signals
"""
for proc in target_platform.beforeHdlArchGeneration:
proc(self)
ns = store_manager.name_scope
mdef = HdlModuleDef()
mdef.dec = self.ent
mdef.module_name = HdlValueId(self.ent.name, obj=self.ent)
mdef.name = "rtl"
processes = sorted(self.statements, key=HdlStatement_sort_key)
processes = sorted(statements_to_HdlStmCodeBlockContainers(processes), key=HdlStatement_sort_key)
# add signals, variables etc. in architecture
for s in sorted((s for s in self.signals
if not s.hidden and
s not in self.interfaces.keys()),
key=RtlSignal_sort_key):
v = HdlIdDef()
v.origin = s
s.name = v.name = ns.checked_name(s.name, s)
v.type = s._dtype
v.value = s.def_val
v.is_const = s._const
mdef.objs.append(v)
for p in processes:
p.name = ns.checked_name(p.name, p)
mdef.objs.extend(processes)
# instantiate subUnits in architecture
for u in self.subUnits:
ci = HdlCompInst()
ci.origin = u
ci.module_name = HdlValueId(u._ctx.ent.name, obj=u._ctx.ent)
ci.name = HdlValueId(ns.checked_name(u._name + "_inst", ci), obj=u)
e = u._ctx.ent
ci.param_map.extend(e.params)
ci.port_map.extend(e.ports)
mdef.objs.append(ci)
self.arch = mdef
return mdef
def getDebugScopeName(self):
scope = []
p = self.parent
while p is not None:
scope.append(p._name)
try:
p = p._parent
except AttributeError:
break
return ".".join(reversed(scope))
@internal
def _try_cast_any_to_HValue(v, dtype, require_const):
if isinstance(v, RtlSignal):
assert not require_const or v._const, \
"Initial value of signal has to be a constant"
return v._auto_cast(dtype)
elif isinstance(v, HValue):
return v._auto_cast(dtype)
elif isinstance(v, InterfaceBase):
return v._sig
else:
return dtype.from_py(v) | mit | 2,040,947,391,428,766,200 | 37.160377 | 105 | 0.593769 | false |
EdinburghNLP/nematus | nematus/exponential_smoothing.py | 1 | 3493 | import tensorflow as tf
# How often to update smoothed variables (in terms of training steps).
DEFAULT_UPDATE_FREQUENCY = 5
class ExponentialSmoothing(object):
"""Defines TensorFlow variables and operations for exponential smoothing.
Following Marian [1], we maintain smoothed versions of all trainable
variables. This class creates the smoothed variables (assuming that the
model has already been initialized) and provides operations that can be
run to update the variables and to interchange the values of the raw and
the smoothed variables (which can be used to swap-in the smoothed versions
for validation, for instance).
Ideally, the smoothed variables would be updated after every training step,
but in practice that introduces a noticeable overhead (around 20%)
due to the need to transfer tensor values from GPU memory into CPU memory.
Instead we allow updating after every N steps by increasing the smoothing
factor accordingly. The default N=5 seems to be a good compromise.
[1]
"Marian: Fast Neural Machine Translation in C++",
Junczys-Dowmunt et al., in Proceedings of ACL 2018, System Demonstrations.
"""
def __init__(self, smoothing_factor,
update_frequency=DEFAULT_UPDATE_FREQUENCY):
"""Creates TF variables and operations.
Args:
smoothing_factor: float controlling weight of past vs new values.
update_frequency: integer indicating how often updates will occur.
"""
self._update_frequency = update_frequency
adjusted_smoothing_factor = smoothing_factor * update_frequency
# Smoothed variables are stored in CPU memory to avoid eating into
# valuable GPU memory.
device_spec = tf.DeviceSpec(device_type="CPU", device_index=0)
with tf.device(device_spec):
# Create variables to hold the smoothed versions of all trainable
# variables.
smooth_vars = {}
for v in tf.compat.v1.trainable_variables():
assert v.name[-2:] == ":0"
name = v.name[:-2] + "_smooth"
s = tf.compat.v1.get_variable(name=name,
initializer=tf.zeros_like(v),
trainable=False,
use_resource=True)
smooth_vars[v.name] = s
# Define the ops to update the smoothed variables.
self._update_ops = []
for v in tf.compat.v1.trainable_variables():
s = smooth_vars[v.name]
updated_s = (1 - adjusted_smoothing_factor) * s \
+ adjusted_smoothing_factor * v
self._update_ops += [tf.compat.v1.assign(s, updated_s)]
# Define the ops to swap the raw and smoothed variables.
self._swap_ops = []
for v in tf.compat.v1.trainable_variables():
s = smooth_vars[v.name]
v_value = v.read_value()
s_value = s.read_value()
with tf.control_dependencies([v_value, s_value]):
self._swap_ops += [v.assign(s_value)]
self._swap_ops += [s.assign(v_value)]
@property
def update_ops(self):
return self._update_ops
@property
def swap_ops(self):
return self._swap_ops
@property
def update_frequency(self):
return self._update_frequency
| bsd-3-clause | 2,266,908,589,127,385,000 | 42.123457 | 79 | 0.608932 | false |
thakadu/Abraxas | scripts/load_feeds.py | 1 | 1986 | """Loads Feed data from a csv file into the feed table of the database"""
import logging
import csv
from optparse import OptionParser
from paste.deploy import appconfig
#from pylons import app_globals
from abraxas.config.environment import load_environment
from sqlalchemy import create_engine, MetaData, select
from sqlalchemy.exc import IntegrityError
import sqlalchemy as sa
"""
The format of the input file should be csv with these fields
Title, Web Url, Feed Url
"""
log = logging.getLogger(__name__)
class DataFormatException(Exception):
"""Raise when the csv file does not have the correct number of fields"""
pass
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--ini',
help='INI file to use for application settings',
type='str',
default='development.ini')
parser.add_option('--filename',
help='File containing place names data.',
type='str',
default='data/feeds.csv')
(options, args) = parser.parse_args()
conf = appconfig('config:' + options.ini, relative_to='.')
load_environment(conf.global_conf, conf.local_conf)
engine = create_engine(conf['sqlalchemy.url'], echo=True)
meta = MetaData()
conn = engine.connect()
print conn
fh = open(options.filename)
reader = csv.reader(fh)
feed_table = sa.Table('feed', meta, autoload=True, autoload_with=engine)
for line in reader:
if len(line) != 3:
raise DataFormatException
title = line[0]
weburl = line[1]
url = line[2]
insert = feed_table.insert().values(
title=title,
url=url,
weburl=weburl)
try:
conn.execute(insert)
except IntegrityError:
# Most likely loading a duplicate feed row
log.debug("Duplicate row, skipping this line...")
continue
| bsd-3-clause | 1,864,919,659,832,746,000 | 27.782609 | 76 | 0.613797 | false |
axiom-data-science/pyaxiom | pyaxiom/netcdf/sensors/dsg/timeseries/ir.py | 1 | 1602 | #!python
# coding=utf-8
from pyaxiom.netcdf import CFDataset
from pyaxiom import logger
class IndexedRaggedTimeseries(CFDataset):
@classmethod
def is_mine(cls, dsg):
try:
rvars = dsg.get_variables_by_attributes(cf_role='timeseries_id')
assert len(rvars) == 1
assert dsg.featureType.lower() == 'timeseries'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
r_index_vars = dsg.get_variables_by_attributes(
instance_dimension=lambda x: x is not None
)
assert len(r_index_vars) == 1
assert r_index_vars[0].instance_dimension in dsg.dimensions # Station dimension
# Allow for string variables
rvar = rvars[0]
# 0 = single
# 1 = array of strings/ints/bytes/etc
# 2 = array of character arrays
assert 0 <= len(rvar.dimensions) <= 2
except AssertionError:
return False
return True
def from_dataframe(self, df, variable_attributes=None, global_attributes=None):
variable_attributes = variable_attributes or {}
global_attributes = global_attributes or {}
raise NotImplementedError
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True):
# if df is None:
# df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows)
raise NotImplementedError
def to_dataframe(self):
raise NotImplementedError
| mit | 3,502,373,414,636,524,500 | 31.693878 | 94 | 0.601124 | false |
andrewyoung1991/scons | QMTest/TestCommon.py | 1 | 27429 | """
TestCommon.py: a testing framework for commands and scripts
with commonly useful error handling
The TestCommon module provides a simple, high-level interface for writing
tests of executable commands and scripts, especially commands and scripts
that interact with the file system. All methods throw exceptions and
exit on failure, with useful error messages. This makes a number of
explicit checks unnecessary, making the test scripts themselves simpler
to write and easier to read.
The TestCommon class is a subclass of the TestCmd class. In essence,
TestCommon is a wrapper that handles common TestCmd error conditions in
useful ways. You can use TestCommon directly, or subclass it for your
program and add additional (or override) methods to tailor it to your
program's specific needs. Alternatively, the TestCommon class serves
as a useful example of how to define your own TestCmd subclass.
As a subclass of TestCmd, TestCommon provides access to all of the
variables and methods from the TestCmd module. Consequently, you can
use any variable or method documented in the TestCmd module without
having to explicitly import TestCmd.
A TestCommon environment object is created via the usual invocation:
import TestCommon
test = TestCommon.TestCommon()
You can use all of the TestCmd keyword arguments when instantiating a
TestCommon object; see the TestCmd documentation for details.
Here is an overview of the methods and keyword arguments that are
provided by the TestCommon class:
test.must_be_writable('file1', ['file2', ...])
test.must_contain('file', 'required text\n')
test.must_contain_all_lines(output, lines, ['title', find])
test.must_contain_any_line(output, lines, ['title', find])
test.must_contain_exactly_lines(output, lines, ['title', find])
test.must_exist('file1', ['file2', ...])
test.must_match('file', "expected contents\n")
test.must_not_be_writable('file1', ['file2', ...])
test.must_not_contain('file', 'banned text\n')
test.must_not_contain_any_line(output, lines, ['title', find])
test.must_not_exist('file1', ['file2', ...])
test.run(options = "options to be prepended to arguments",
stdout = "expected standard output from the program",
stderr = "expected error output from the program",
status = expected_status,
match = match_function)
The TestCommon module also provides the following variables
TestCommon.python
TestCommon._python_
TestCommon.exe_suffix
TestCommon.obj_suffix
TestCommon.shobj_prefix
TestCommon.shobj_suffix
TestCommon.lib_prefix
TestCommon.lib_suffix
TestCommon.dll_prefix
TestCommon.dll_suffix
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCommon.py 1.3.D001 2010/06/03 12:58:27 knight"
__version__ = "1.3"
import copy
import os
import stat
import sys
import glob
try:
from collections import UserList
except ImportError:
# no 'collections' module or no UserList in collections
exec('from UserList import UserList')
from TestCmd import *
from TestCmd import __all__
__all__.extend([ 'TestCommon',
'exe_suffix',
'obj_suffix',
'shobj_prefix',
'shobj_suffix',
'lib_prefix',
'lib_suffix',
'dll_prefix',
'dll_suffix',
])
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=False):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
# Variables that describe the prefixes and suffixes on this system.
if sys.platform == 'win32':
exe_suffix = '.exe'
obj_suffix = '.obj'
shobj_suffix = '.obj'
shobj_prefix = ''
lib_prefix = ''
lib_suffix = '.lib'
dll_prefix = ''
dll_suffix = '.dll'
elif sys.platform == 'cygwin':
exe_suffix = '.exe'
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'cyg'
dll_suffix = '.dll'
elif sys.platform.find('irix') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.o'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
elif sys.platform.find('darwin') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
elif sys.platform.find('sunos') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.o'
shobj_prefix = 'so_'
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
else:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
def is_List(e):
return isinstance(e, (list, UserList))
def is_Tuple(e):
return isinstance(e, tuple)
def is_Sequence(e):
return (not hasattr(e, "strip") and
hasattr(e, "__getitem__") or
hasattr(e, "__iter__"))
def is_writable(f):
mode = os.stat(f)[stat.ST_MODE]
return mode & stat.S_IWUSR
def separate_files(flist):
existing = []
missing = []
for f in flist:
if os.path.exists(f):
existing.append(f)
else:
missing.append(f)
return existing, missing
if os.name == 'posix':
def _failed(self, status = 0):
if self.status is None or status is None:
return None
return _status(self) != status
def _status(self):
return self.status
elif os.name == 'nt':
def _failed(self, status = 0):
return not (self.status is None or status is None) and \
self.status != status
def _status(self):
return self.status
class TestCommon(TestCmd):
# Additional methods from the Perl Test::Cmd::Common module
# that we may wish to add in the future:
#
# $test->subdir('subdir', ...);
#
# $test->copy('src_file', 'dst_file');
def __init__(self, **kw):
"""Initialize a new TestCommon instance. This involves just
calling the base class initialization, and then changing directory
to the workdir.
"""
TestCmd.__init__(self, **kw)
os.chdir(self.workdir)
def options_arguments(self, options, arguments):
"""Merges the "options" keyword argument with the arguments."""
if options:
if arguments is None:
return options
if isinstance(options, str):
options = [options]
if isinstance(arguments, str):
arguments = [arguments]
arguments = ' '.join(options + arguments)
return arguments
def must_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
not writable.
"""
files = [is_List(x) and os.path.join(*x) or x for x in files]
existing, missing = separate_files(files)
unwritable = [x for x in existing if not is_writable(x)]
if missing:
print "Missing files: `%s'" % "', `".join(missing)
if unwritable:
print "Unwritable files: `%s'" % "', `".join(unwritable)
self.fail_test(missing + unwritable)
def must_contain(self, file, required, mode = 'rb', find = None):
"""Ensures that the specified file contains the required text.
"""
file_contents = self.read(file, mode)
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
contains = find(file_contents, required)
if not contains:
print "File `%s' does not contain required string." % file
print self.banner('Required string ')
print required
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(not contains)
def must_contain_all_lines(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains all of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
missing = []
if is_List(output):
output = '\n'.join(output)
for line in lines:
if find(output, line) is None:
missing.append(line)
if missing:
if title is None:
title = 'output'
sys.stdout.write("Missing expected lines from %s:\n" % title)
for line in missing:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' ') + '\n')
sys.stdout.write(output)
self.fail_test()
def must_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains at least one of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
for line in lines:
if find(output, line) is not None:
return
if title is None:
title = 'output'
sys.stdout.write("Missing any expected line from %s:\n" % title)
for line in lines:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' ') + '\n')
sys.stdout.write(output)
self.fail_test()
def must_contain_exactly_lines(self, output, expect, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains all of the lines in the expected string (second argument)
with none left over.
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output. The function must return the index
of the found line in the output, or None if the line is not found.
"""
out = output.splitlines()
if is_List(expect):
exp = [ e.rstrip('\n') for e in expect ]
else:
exp = expect.splitlines()
if sorted(out) == sorted(exp):
# early out for exact match
return
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
missing = []
for line in exp:
found = find(out, line)
if found is None:
missing.append(line)
else:
out.pop(found)
if not missing and not out:
# all lines were matched
return
if title is None:
title = 'output'
if missing:
sys.stdout.write("Missing expected lines from %s:\n" % title)
for line in missing:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner('Missing %s ' % title) + '\n')
if out:
sys.stdout.write("Extra unexpected lines from %s:\n" % title)
for line in out:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner('Extra %s ' % title) + '\n')
sys.stdout.flush()
self.fail_test()
def must_contain_lines(self, lines, output, title=None, find = None):
# Deprecated; retain for backwards compatibility.
return self.must_contain_all_lines(output, lines, title, find)
def must_exist(self, *files):
"""Ensures that the specified file(s) must exist. An individual
file be specified as a list of directory names, in which case the
pathname will be constructed by concatenating them. Exits FAILED
if any of the files does not exist.
"""
files = [is_List(x) and os.path.join(*x) or x for x in files]
missing = [x for x in files if not os.path.exists(x) and not os.path.islink(x) ]
if missing:
print "Missing files: `%s'" % "', `".join(missing)
self.fail_test(missing)
def must_exist_one_of(self, files):
"""Ensures that at least one of the specified file(s) exists.
The filenames can be given as a list, where each entry may be
a single path string, or a tuple of folder names and the final
filename that get concatenated.
Supports wildcard names like 'foo-1.2.3-*.rpm'.
Exits FAILED if none of the files exists.
"""
missing = []
for x in files:
if is_List(x) or is_Tuple(x):
xpath = os.path.join(*x)
else:
xpath = is_Sequence(x) and os.path.join(x) or x
if glob.glob(xpath):
return
missing.append(xpath)
print "Missing one of: `%s'" % "', `".join(missing)
self.fail_test(missing)
def must_match(self, file, expect, mode = 'rb', match=None):
"""Matches the contents of the specified file (first argument)
against the expected contents (second argument). The expected
contents are a list of lines or a string which will be split
on newlines.
"""
file_contents = self.read(file, mode)
if not match:
match = self.match
try:
self.fail_test(not match(file_contents, expect))
except KeyboardInterrupt:
raise
except:
print "Unexpected contents of `%s'" % file
self.diff(expect, file_contents, 'contents ')
raise
def must_not_contain(self, file, banned, mode = 'rb', find = None):
"""Ensures that the specified file doesn't contain the banned text.
"""
file_contents = self.read(file, mode)
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
contains = find(file_contents, banned)
if contains:
print "File `%s' contains banned string." % file
print self.banner('Banned string ')
print banned
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(contains)
def must_not_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
does not contain any of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
def find(o, l):
try:
return o.index(l)
except ValueError:
return None
unexpected = []
for line in lines:
if find(output, line) is not None:
unexpected.append(line)
if unexpected:
if title is None:
title = 'output'
sys.stdout.write("Unexpected lines in %s:\n" % title)
for line in unexpected:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' ') + '\n')
sys.stdout.write(output)
self.fail_test()
def must_not_contain_lines(self, lines, output, title=None, find=None):
return self.must_not_contain_any_line(output, lines, title, find)
def must_not_exist(self, *files):
"""Ensures that the specified file(s) must not exist.
An individual file be specified as a list of directory names, in
which case the pathname will be constructed by concatenating them.
Exits FAILED if any of the files exists.
"""
files = [is_List(x) and os.path.join(*x) or x for x in files]
existing = [x for x in files if os.path.exists(x) or os.path.islink(x)]
if existing:
print "Unexpected files exist: `%s'" % "', `".join(existing)
self.fail_test(existing)
def must_not_exist_any_of(self, files):
"""Ensures that none of the specified file(s) exists.
The filenames can be given as a list, where each entry may be
a single path string, or a tuple of folder names and the final
filename that get concatenated.
Supports wildcard names like 'foo-1.2.3-*.rpm'.
Exits FAILED if any of the files exists.
"""
existing = []
for x in files:
if is_List(x) or is_Tuple(x):
xpath = os.path.join(*x)
else:
xpath = is_Sequence(x) and os.path.join(x) or x
if glob.glob(xpath):
existing.append(xpath)
if existing:
print "Unexpected files exist: `%s'" % "', `".join(existing)
self.fail_test(existing)
def must_not_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are not writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
writable.
"""
files = [is_List(x) and os.path.join(*x) or x for x in files]
existing, missing = separate_files(files)
writable = list(filter(is_writable, existing))
if missing:
print "Missing files: `%s'" % "', `".join(missing)
if writable:
print "Writable files: `%s'" % "', `".join(writable)
self.fail_test(missing + writable)
def _complete(self, actual_stdout, expected_stdout,
actual_stderr, expected_stderr, status, match):
"""
Post-processes running a subcommand, checking for failure
status and displaying output appropriately.
"""
if _failed(self, status):
expect = ''
if status != 0:
expect = " (expected %s)" % str(status)
print "%s returned %s%s" % (self.program, _status(self), expect)
print self.banner('STDOUT ')
print actual_stdout
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if (expected_stdout is not None
and not match(actual_stdout, expected_stdout)):
self.diff(expected_stdout, actual_stdout, 'STDOUT ')
if actual_stderr:
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if (expected_stderr is not None
and not match(actual_stderr, expected_stderr)):
print self.banner('STDOUT ')
print actual_stdout
self.diff(expected_stderr, actual_stderr, 'STDERR ')
self.fail_test()
def start(self, program = None,
interpreter = None,
options = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment, handling
any exceptions.
"""
arguments = self.options_arguments(options, arguments)
try:
return TestCmd.start(self, program, interpreter, arguments,
universal_newlines, **kw)
except KeyboardInterrupt:
raise
except Exception, e:
print self.banner('STDOUT ')
try:
print self.stdout()
except IndexError:
pass
print self.banner('STDERR ')
try:
print self.stderr()
except IndexError:
pass
cmd_args = self.command_args(program, interpreter, arguments)
sys.stderr.write('Exception trying to execute: %s\n' % cmd_args)
raise e
def finish(self, popen, stdout = None, stderr = '', status = 0, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument. Additional arguments are similar
to those of the run() method:
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
"""
TestCmd.finish(self, popen, **kw)
match = kw.get('match', self.match)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def run(self, options = None, arguments = None,
stdout = None, stderr = '', status = 0, **kw):
"""Runs the program under test, checking that the test succeeded.
The parameters are the same as the base TestCmd.run() method,
with the addition of:
options Extra options that get appended to the beginning
of the arguments.
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
By default, this expects a successful exit (status = 0), does
not test standard output (stdout = None), and expects that error
output is empty (stderr = "").
"""
kw['arguments'] = self.options_arguments(options, arguments)
try:
match = kw['match']
del kw['match']
except KeyError:
match = self.match
TestCmd.run(self, **kw)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def skip_test(self, message="Skipping test.\n"):
"""Skips a test.
Proper test-skipping behavior is dependent on the external
TESTCOMMON_PASS_SKIPS environment variable. If set, we treat
the skip as a PASS (exit 0), and otherwise treat it as NO RESULT.
In either case, we print the specified message as an indication
that the substance of the test was skipped.
(This was originally added to support development under Aegis.
Technically, skipping a test is a NO RESULT, but Aegis would
treat that as a test failure and prevent the change from going to
the next step. Since we ddn't want to force anyone using Aegis
to have to install absolutely every tool used by the tests, we
would actually report to Aegis that a skipped test has PASSED
so that the workflow isn't held up.)
"""
if message:
sys.stdout.write(message)
sys.stdout.flush()
pass_skips = os.environ.get('TESTCOMMON_PASS_SKIPS')
if pass_skips in [None, 0, '0']:
# skip=1 means skip this function when showing where this
# result came from. They only care about the line where the
# script called test.skip_test(), not the line number where
# we call test.no_result().
self.no_result(skip=1)
else:
# We're under the development directory for this change,
# so this is an Aegis invocation; pass the test (exit 0).
self.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 3,280,582,583,667,174,400 | 36.116373 | 88 | 0.580298 | false |
Pikecillo/genna | external/PyXML-0.8.4/test/dom/test_entity.py | 1 | 1296 | from TestSuite import EMPTY_NAMESPACE
def test(tester):
tester.startGroup('Entity')
tester.startTest('Testing syntax')
try:
from xml.dom import Entity
from xml.dom.Entity import Entity
except:
tester.error('Error in syntax', 1)
tester.testDone()
tester.startTest('Creating test environment')
from xml.dom import implementation
dt = implementation.createDocumentType('','','')
doc = implementation.createDocument(EMPTY_NAMESPACE,'ROOT',dt)
ent = doc._4dom_createEntity("-//FOURTHOUGHT//EN", "/tmp/entity", "")
tester.testDone()
tester.startTest('Testing attributes')
if ent.publicId != '-//FOURTHOUGHT//EN':
tester.error('publicId is incorrect')
if ent.systemId != '/tmp/entity':
tester.error('systemId is incorrect')
tester.testDone()
tester.startTest('Test cloneNode()')
ent1 = ent.cloneNode(1)
if ent1.publicId != ent.publicId:
tester.error("cloneNode fails on publicId")
if ent1.systemId != ent.systemId:
tester.error("cloneNode fails on systemId")
tester.testDone()
return tester.groupDone()
if __name__ == '__main__':
import sys
import TestSuite
tester = TestSuite.TestSuite()
retVal = test(tester)
sys.exit(retVal)
| gpl-2.0 | 7,661,269,185,216,156,000 | 24.411765 | 73 | 0.652778 | false |
opennode/waldur-mastermind | src/waldur_core/core/tests/test_admin.py | 1 | 4743 | from django.contrib.admin.sites import AdminSite
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from reversion.models import Version
from waldur_core.core.admin import UserAdmin
from waldur_core.core.models import User
from waldur_core.core.tests.helpers import override_waldur_core_settings
from waldur_core.structure.admin import CustomerAdmin
from waldur_core.structure.models import Customer
from waldur_core.structure.tests.factories import CustomerFactory, UserFactory
class MockRequest:
pass
class MockSuperUser:
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class UserAdminTest(TestCase):
def change_user(self, **kwargs):
user = UserFactory()
ma = UserAdmin(User, AdminSite())
UserChangeForm = ma.get_form(request, user, change=True)
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
post_data.update(kwargs)
form = UserChangeForm(instance=user, data=post_data)
form.save()
user.refresh_from_db()
return user
def test_civil_number_is_stripped(self):
user = self.change_user(civil_number=' NEW_CIVIL_NUMBER ')
self.assertEqual(user.civil_number, 'NEW_CIVIL_NUMBER')
def test_whitespace_civil_number_converts_to_none(self):
user = self.change_user(civil_number=' ')
self.assertEqual(user.civil_number, None)
def test_empty_civil_number_converts_to_none(self):
user = self.change_user(civil_number='')
self.assertEqual(user.civil_number, None)
class NativeNameAdminTest(TestCase):
@override_waldur_core_settings(NATIVE_NAME_ENABLED=False)
def test_native_name_is_omitted_in_user_admin_if_feature_is_not_enabled(self):
user = UserFactory()
ma = UserAdmin(User, AdminSite())
self.assertFalse('native_name' in ma.get_list_display(request))
self.assertFalse('native_name' in ma.get_search_fields(request))
self.assertTrue(
all(
'native_name' not in fieldset[1]['fields']
for fieldset in ma.get_fieldsets(request, user)
)
)
@override_waldur_core_settings(NATIVE_NAME_ENABLED=True)
def test_native_name_is_rendered_in_user_admin_if_feature_is_enabled(self):
user = UserFactory()
ma = UserAdmin(User, AdminSite())
self.assertTrue('native_name' in ma.get_list_display(request))
self.assertTrue('native_name' in ma.get_search_fields(request))
self.assertTrue(
any(
'native_name' in fieldset[1]['fields']
for fieldset in ma.get_fieldsets(request, user)
)
)
@override_waldur_core_settings(NATIVE_NAME_ENABLED=False)
def test_native_name_is_omitted_in_customer_admin_if_feature_is_disabled(self):
customer = CustomerFactory()
ma = CustomerAdmin(Customer, AdminSite())
self.assertFalse('native_name' in ma.get_fields(request, customer))
@override_waldur_core_settings(NATIVE_NAME_ENABLED=True)
def test_native_name_is_rendered_in_customer_admin_if_feature_is_enabled(self):
customer = CustomerFactory()
ma = CustomerAdmin(Customer, AdminSite())
self.assertTrue('native_name' in ma.get_fields(request, customer))
class UserReversionTest(TestCase):
@override_settings(
AUTHENTICATION_BACKENDS=('django.contrib.auth.backends.ModelBackend',)
)
def test_new_revisions_are_not_created_on_each_authentication(self):
staff = UserFactory(is_staff=True, is_superuser=True)
staff_password = User.objects.make_random_password()
staff.set_password(staff_password)
staff.save()
self.assertTrue(
self.client.login(username=staff.username, password=staff_password)
)
url = '/admin/core/user/add/'
user_password = User.objects.make_random_password()
self.client.post(
url,
{
'username': 'test',
'password1': user_password,
'password2': user_password,
},
)
user = User.objects.get(username='test')
ct = ContentType.objects.get_for_model(user)
self.assertEqual(
Version.objects.filter(object_id=user.id, content_type=ct).count(), 1
)
user.is_staff = True
user.save()
self.assertTrue(
self.client.login(username=user.username, password=user_password)
)
self.assertEqual(
Version.objects.filter(object_id=user.id, content_type=ct).count(), 1
)
| mit | 5,806,270,913,715,895,000 | 34.931818 | 83 | 0.654649 | false |
yro/veda_worker | veda_worker/generate_delivery.py | 1 | 5380 |
import boto
import boto.s3
from boto.s3.key import Key
import hashlib
import os
from os.path import expanduser
import sys
import shutil
from global_vars import *
from reporting import ErrorObject
from config import WorkerSetup
homedir = expanduser("~")
"""
Gets specified Video and Encode object, and delivers file to endpoint
from VEDA_WORK_DIR, retrieves and checks URL, and passes info to objects
"""
WS = WorkerSetup()
if os.path.exists(WS.instance_yaml):
WS.run()
settings = WS.settings_dict
class Deliverable():
def __init__(self, VideoObject, encode_profile, output_file, **kwargs):
self.VideoObject = VideoObject
self.encode_profile = encode_profile
self.output_file = output_file
self.jobid = kwargs.get('jobid', None)
self.workdir = kwargs.get('workdir', None)
self.endpoint_url = None
self.hash_sum = 0
self.upload_filesize = 0
self.delivered = False
def run(self):
"""
Get file particulars, upload to s3
"""
if self.workdir is None:
if self.jobid is None:
self.workdir = os.path.join(
homedir,
'ENCODE_WORKDIR'
)
else:
self.workdir = os.path.join(
homedir,
'ENCODE_WORKDIR',
self.jobid
)
# file size
self.upload_filesize = os.stat(
os.path.join(self.workdir, self.output_file)
).st_size
# hash sum
self.hash_sum = hashlib.md5(
open(
os.path.join(
self.workdir,
self.output_file
), 'rb'
).read()
).hexdigest()
if self.upload_filesize < MULTI_UPLOAD_BARRIER:
"""
Upload single part
"""
self.delivered = self._s3_upload()
else:
"""
Upload multipart
"""
self.delivered = self._boto_multipart()
if self.delivered is False:
return None
self.endpoint_url = '/'.join((
'https://s3.amazonaws.com',
settings['aws_deliver_bucket'],
self.output_file
))
return True
def _s3_upload(self):
"""
Upload single part (under threshold in node_config)
node_config MULTI_UPLOAD_BARRIER
"""
try:
conn = boto.connect_s3(
settings['aws_deliver_access_key'],
settings['aws_deliver_secret_key']
)
delv_bucket = conn.get_bucket(settings['aws_deliver_bucket'])
except:
ErrorObject().print_error(
message='Deliverable Fail: s3 Connection Error - Singleton'
)
return False
upload_key = Key(delv_bucket)
upload_key.key = self.output_file
upload_key.set_contents_from_filename(
os.path.join(self.workdir, self.output_file)
)
return True
def _boto_multipart(self):
"""
Split file into chunks, upload chunks
NOTE: this should never happen, as your files should be much
smaller than this, but one never knows
"""
if not os.path.exists(
os.path.join(
self.workdir,
self.output_file.split('.')[0]
)
):
os.mkdir(os.path.join(
self.workdir,
self.output_file.split('.')[0]
))
os.chdir(
os.path.join(self.workdir, self.output_file.split('.')[0])
)
# Split File into chunks
split_command = 'split -b10m -a5' # 5 part names of 5mb
sys.stdout.write('%s : %s\n' % (self.output_file, 'Generating Multipart'))
os.system(' '.join((split_command, os.path.join(self.workdir, self.output_file))))
sys.stdout.flush()
# Connect to s3
try:
c = boto.connect_s3(
settings['aws_deliver_access_key'],
settings['aws_deliver_secret_key']
)
b = c.lookup(settings['aws_deliver_bucket'])
except:
ErrorObject().print_error(
message='Deliverable Fail: s3 Connection Error - Multipart'
)
return False
if b is None:
ErrorObject().print_error(
message='Deliverable Fail: s3 Bucket Connection Error'
)
return False
"""
Upload and stitch parts
"""
mp = b.initiate_multipart_upload(self.output_file)
x = 1
for fle in sorted(os.listdir(
os.path.join(
self.workdir,
self.output_file.split('.')[0]
)
)):
sys.stdout.write('%s : %s\r' % (fle, 'uploading part'))
fp = open(fle, 'rb')
mp.upload_part_from_file(fp, x)
fp.close()
sys.stdout.flush()
x += 1
sys.stdout.write('\n')
mp.complete_upload()
# Clean up multipart
shutil.rmtree(os.path.join(self.workdir, self.output_file.split('.')[0]))
return True
def main():
pass
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 7,279,386,653,945,648,000 | 26.589744 | 90 | 0.512639 | false |
yeleman/ramed-desktop | ramed/static.py | 1 | 1980 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import os
import sys
import platform
import json
import logging
IS_FROZEN = hasattr(sys, 'frozen')
WORKING_DIR = os.path.dirname(os.path.abspath(sys.executable
if IS_FROZEN
else os.path.dirname(__file__)))
print(WORKING_DIR)
try:
with open(os.path.join(WORKING_DIR, 'ramed.config'),
encoding='UTF-8', mode='r') as f:
CONFIG = json.load(f)
except Exception as ex:
if not isinstance(ex, IOError):
print(repr(ex))
CONFIG = {}
def get_log_level(value):
level = value.upper()
if level in ('DEBUG', 'INFO', 'WARNING', 'CRITICAL'):
return getattr(logging, level)
return logging.DEBUG
class Constants(object):
SYSTEM = platform.system()
IS_FROZEN = IS_FROZEN
WORKING_DIR = WORKING_DIR
MAIN_SCRIPT = "ramed.py"
APP_NAME = "RAMED Desktop"
APP_TITLE = "Export des fichiers de collecte RAMED"
APP_VERSION = "1.3"
APP_DATE = "juillet 2016"
IMAGES_FOLDER = os.path.join("media", "img")
PNG_ICON = os.path.join(IMAGES_FOLDER, "logo.png")
ICO_ICON = os.path.join(IMAGES_FOLDER, "logo.ico")
DATE_DISPLAY_FORMAT = "dd/MM/yyyy"
AUTHOR = "yɛlɛman s.à.r.l"
AUTHOR_EMAIL = "[email protected]"
AUTHOR_PHONE = "(223) 76 33 30 05"
AUTHOR_COPY = "© RAMED/UNICEF/YELEMAN"
VERBOSE = CONFIG.get('VERBOSE', False)
LOG_LEVEL = get_log_level(CONFIG.get('LOG_LEVEL', 'DEBUG'))
AGGREGATE_URL = CONFIG.get('AGGREGATE_URL', "http://192.168.0.10")
DEFAULT_FOLDER_NAME = CONFIG.get('DEFAULT_FOLDER_NAME', "Données Collecte")
ODK_TIMEOUT = CONFIG.get('ODK_TIMEOUT', 1)
@classmethod
def intpath(cls, path):
# assume path is either a str path or a list of components
if not isinstance(path, str):
path = os.path.join(*path)
return os.path.join(cls.WORKING_DIR, path)
| mit | 2,534,540,856,373,804,000 | 26.774648 | 79 | 0.616633 | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/isomorphism/ismags.py | 1 | 42775 | """
****************
ISMAGS Algorithm
****************
Provides a Python implementation of the ISMAGS algorithm. [1]_
It is capable of finding (subgraph) isomorphisms between two graphs, taking the
symmetry of the subgraph into account. In most cases the VF2 algorithm is
faster (at least on small graphs) than this implementation, but in some cases
there is an exponential number of isomorphisms that are symmetrically
equivalent. In that case, the ISMAGS algorithm will provide only one solution
per symmetry group.
>>> petersen = nx.petersen_graph()
>>> ismags = nx.isomorphism.ISMAGS(petersen, petersen)
>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=False))
>>> len(isomorphisms)
120
>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=True))
>>> answer = [{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7:7, 8: 8, 9: 9}]
>>> answer == isomorphisms
True
In addition, this implementation also provides an interface to find the
largest common induced subgraph [2]_ between any two graphs, again taking
symmetry into account. Given `graph` and `subgraph` the algorithm will remove
nodes from the `subgraph` until `subgraph` is isomorphic to a subgraph of
`graph`. Since only the symmetry of `subgraph` is taken into account it is
worth thinking about how you provide your graphs:
>>> graph1 = nx.path_graph(4)
>>> graph2 = nx.star_graph(3)
>>> ismags = nx.isomorphism.ISMAGS(graph1, graph2)
>>> ismags.is_isomorphic()
False
>>> largest_common_subgraph = list(ismags.largest_common_subgraph())
>>> answer = [
... {1: 0, 0: 1, 2: 2},
... {2: 0, 1: 1, 3: 2}
... ]
>>> answer == largest_common_subgraph
True
>>> ismags2 = nx.isomorphism.ISMAGS(graph2, graph1)
>>> largest_common_subgraph = list(ismags2.largest_common_subgraph())
>>> answer = [
... {1: 0, 0: 1, 2: 2},
... {1: 0, 0: 1, 3: 2},
... {2: 0, 0: 1, 1: 2},
... {2: 0, 0: 1, 3: 2},
... {3: 0, 0: 1, 1: 2},
... {3: 0, 0: 1, 2: 2}
... ]
>>> answer == largest_common_subgraph
True
However, when not taking symmetry into account, it doesn't matter:
>>> largest_common_subgraph = list(ismags.largest_common_subgraph(symmetry=False))
>>> answer = [
... {1: 0, 0: 1, 2: 2},
... {1: 0, 2: 1, 0: 2},
... {2: 0, 1: 1, 3: 2},
... {2: 0, 3: 1, 1: 2},
... {1: 0, 0: 1, 2: 3},
... {1: 0, 2: 1, 0: 3},
... {2: 0, 1: 1, 3: 3},
... {2: 0, 3: 1, 1: 3},
... {1: 0, 0: 2, 2: 3},
... {1: 0, 2: 2, 0: 3},
... {2: 0, 1: 2, 3: 3},
... {2: 0, 3: 2, 1: 3}
... ]
>>> answer == largest_common_subgraph
True
>>> largest_common_subgraph = list(ismags2.largest_common_subgraph(symmetry=False))
>>> answer = [
... {1: 0, 0: 1, 2: 2},
... {1: 0, 0: 1, 3: 2},
... {2: 0, 0: 1, 1: 2},
... {2: 0, 0: 1, 3: 2},
... {3: 0, 0: 1, 1: 2},
... {3: 0, 0: 1, 2: 2},
... {1: 1, 0: 2, 2: 3},
... {1: 1, 0: 2, 3: 3},
... {2: 1, 0: 2, 1: 3},
... {2: 1, 0: 2, 3: 3},
... {3: 1, 0: 2, 1: 3},
... {3: 1, 0: 2, 2: 3}
... ]
>>> answer == largest_common_subgraph
True
Notes
-----
- The current implementation works for undirected graphs only. The algorithm
in general should work for directed graphs as well though.
- Node keys for both provided graphs need to be fully orderable as well as
hashable.
- Node and edge equality is assumed to be transitive: if A is equal to B, and
B is equal to C, then A is equal to C.
References
----------
.. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle,
M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General
Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph
Enumeration", PLoS One 9(5): e97896, 2014.
https://doi.org/10.1371/journal.pone.0097896
.. [2] https://en.wikipedia.org/wiki/Maximum_common_induced_subgraph
"""
__all__ = ["ISMAGS"]
from collections import defaultdict, Counter
from functools import reduce, wraps
import itertools
def are_all_equal(iterable):
"""
Returns ``True`` if and only if all elements in `iterable` are equal; and
``False`` otherwise.
Parameters
----------
iterable: collections.abc.Iterable
The container whose elements will be checked.
Returns
-------
bool
``True`` iff all elements in `iterable` compare equal, ``False``
otherwise.
"""
try:
shape = iterable.shape
except AttributeError:
pass
else:
if len(shape) > 1:
message = "The function does not works on multidimension arrays."
raise NotImplementedError(message) from None
iterator = iter(iterable)
first = next(iterator, None)
return all(item == first for item in iterator)
def make_partitions(items, test):
"""
Partitions items into sets based on the outcome of ``test(item1, item2)``.
Pairs of items for which `test` returns `True` end up in the same set.
Parameters
----------
items : collections.abc.Iterable[collections.abc.Hashable]
Items to partition
test : collections.abc.Callable[collections.abc.Hashable, collections.abc.Hashable]
A function that will be called with 2 arguments, taken from items.
Should return `True` if those 2 items need to end up in the same
partition, and `False` otherwise.
Returns
-------
list[set]
A list of sets, with each set containing part of the items in `items`,
such that ``all(test(*pair) for pair in itertools.combinations(set, 2))
== True``
Notes
-----
The function `test` is assumed to be transitive: if ``test(a, b)`` and
``test(b, c)`` return ``True``, then ``test(a, c)`` must also be ``True``.
"""
partitions = []
for item in items:
for partition in partitions:
p_item = next(iter(partition))
if test(item, p_item):
partition.add(item)
break
else: # No break
partitions.append({item})
return partitions
def partition_to_color(partitions):
"""
Creates a dictionary with for every item in partition for every partition
in partitions the index of partition in partitions.
Parameters
----------
partitions: collections.abc.Sequence[collections.abc.Iterable]
As returned by :func:`make_partitions`.
Returns
-------
dict
"""
colors = dict()
for color, keys in enumerate(partitions):
for key in keys:
colors[key] = color
return colors
def intersect(collection_of_sets):
"""
Given an collection of sets, returns the intersection of those sets.
Parameters
----------
collection_of_sets: collections.abc.Collection[set]
A collection of sets.
Returns
-------
set
An intersection of all sets in `collection_of_sets`. Will have the same
type as the item initially taken from `collection_of_sets`.
"""
collection_of_sets = list(collection_of_sets)
first = collection_of_sets.pop()
out = reduce(set.intersection, collection_of_sets, set(first))
return type(first)(out)
class ISMAGS:
"""
Implements the ISMAGS subgraph matching algorith. [1]_ ISMAGS stands for
"Index-based Subgraph Matching Algorithm with General Symmetries". As the
name implies, it is symmetry aware and will only generate non-symmetric
isomorphisms.
Notes
-----
The implementation imposes additional conditions compared to the VF2
algorithm on the graphs provided and the comparison functions
(:attr:`node_equality` and :attr:`edge_equality`):
- Node keys in both graphs must be orderable as well as hashable.
- Equality must be transitive: if A is equal to B, and B is equal to C,
then A must be equal to C.
Attributes
----------
graph: networkx.Graph
subgraph: networkx.Graph
node_equality: collections.abc.Callable
The function called to see if two nodes should be considered equal.
It's signature looks like this:
``f(graph1: networkx.Graph, node1, graph2: networkx.Graph, node2) -> bool``.
`node1` is a node in `graph1`, and `node2` a node in `graph2`.
Constructed from the argument `node_match`.
edge_equality: collections.abc.Callable
The function called to see if two edges should be considered equal.
It's signature looks like this:
``f(graph1: networkx.Graph, edge1, graph2: networkx.Graph, edge2) -> bool``.
`edge1` is an edge in `graph1`, and `edge2` an edge in `graph2`.
Constructed from the argument `edge_match`.
References
----------
.. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle,
M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General
Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph
Enumeration", PLoS One 9(5): e97896, 2014.
https://doi.org/10.1371/journal.pone.0097896
"""
def __init__(self, graph, subgraph, node_match=None, edge_match=None, cache=None):
"""
Parameters
----------
graph: networkx.Graph
subgraph: networkx.Graph
node_match: collections.abc.Callable or None
Function used to determine whether two nodes are equivalent. Its
signature should look like ``f(n1: dict, n2: dict) -> bool``, with
`n1` and `n2` node property dicts. See also
:func:`~networkx.algorithms.isomorphism.categorical_node_match` and
friends.
If `None`, all nodes are considered equal.
edge_match: collections.abc.Callable or None
Function used to determine whether two edges are equivalent. Its
signature should look like ``f(e1: dict, e2: dict) -> bool``, with
`e1` and `e2` edge property dicts. See also
:func:`~networkx.algorithms.isomorphism.categorical_edge_match` and
friends.
If `None`, all edges are considered equal.
cache: collections.abc.Mapping
A cache used for caching graph symmetries.
"""
# TODO: graph and subgraph setter methods that invalidate the caches.
# TODO: allow for precomputed partitions and colors
self.graph = graph
self.subgraph = subgraph
self._symmetry_cache = cache
# Naming conventions are taken from the original paper. For your
# sanity:
# sg: subgraph
# g: graph
# e: edge(s)
# n: node(s)
# So: sgn means "subgraph nodes".
self._sgn_partitions_ = None
self._sge_partitions_ = None
self._sgn_colors_ = None
self._sge_colors_ = None
self._gn_partitions_ = None
self._ge_partitions_ = None
self._gn_colors_ = None
self._ge_colors_ = None
self._node_compat_ = None
self._edge_compat_ = None
if node_match is None:
self.node_equality = self._node_match_maker(lambda n1, n2: True)
self._sgn_partitions_ = [set(self.subgraph.nodes)]
self._gn_partitions_ = [set(self.graph.nodes)]
self._node_compat_ = {0: 0}
else:
self.node_equality = self._node_match_maker(node_match)
if edge_match is None:
self.edge_equality = self._edge_match_maker(lambda e1, e2: True)
self._sge_partitions_ = [set(self.subgraph.edges)]
self._ge_partitions_ = [set(self.graph.edges)]
self._edge_compat_ = {0: 0}
else:
self.edge_equality = self._edge_match_maker(edge_match)
@property
def _sgn_partitions(self):
if self._sgn_partitions_ is None:
def nodematch(node1, node2):
return self.node_equality(self.subgraph, node1, self.subgraph, node2)
self._sgn_partitions_ = make_partitions(self.subgraph.nodes, nodematch)
return self._sgn_partitions_
@property
def _sge_partitions(self):
if self._sge_partitions_ is None:
def edgematch(edge1, edge2):
return self.edge_equality(self.subgraph, edge1, self.subgraph, edge2)
self._sge_partitions_ = make_partitions(self.subgraph.edges, edgematch)
return self._sge_partitions_
@property
def _gn_partitions(self):
if self._gn_partitions_ is None:
def nodematch(node1, node2):
return self.node_equality(self.graph, node1, self.graph, node2)
self._gn_partitions_ = make_partitions(self.graph.nodes, nodematch)
return self._gn_partitions_
@property
def _ge_partitions(self):
if self._ge_partitions_ is None:
def edgematch(edge1, edge2):
return self.edge_equality(self.graph, edge1, self.graph, edge2)
self._ge_partitions_ = make_partitions(self.graph.edges, edgematch)
return self._ge_partitions_
@property
def _sgn_colors(self):
if self._sgn_colors_ is None:
self._sgn_colors_ = partition_to_color(self._sgn_partitions)
return self._sgn_colors_
@property
def _sge_colors(self):
if self._sge_colors_ is None:
self._sge_colors_ = partition_to_color(self._sge_partitions)
return self._sge_colors_
@property
def _gn_colors(self):
if self._gn_colors_ is None:
self._gn_colors_ = partition_to_color(self._gn_partitions)
return self._gn_colors_
@property
def _ge_colors(self):
if self._ge_colors_ is None:
self._ge_colors_ = partition_to_color(self._ge_partitions)
return self._ge_colors_
@property
def _node_compatibility(self):
if self._node_compat_ is not None:
return self._node_compat_
self._node_compat_ = {}
for sgn_part_color, gn_part_color in itertools.product(
range(len(self._sgn_partitions)), range(len(self._gn_partitions))
):
sgn = next(iter(self._sgn_partitions[sgn_part_color]))
gn = next(iter(self._gn_partitions[gn_part_color]))
if self.node_equality(self.subgraph, sgn, self.graph, gn):
self._node_compat_[sgn_part_color] = gn_part_color
return self._node_compat_
@property
def _edge_compatibility(self):
if self._edge_compat_ is not None:
return self._edge_compat_
self._edge_compat_ = {}
for sge_part_color, ge_part_color in itertools.product(
range(len(self._sge_partitions)), range(len(self._ge_partitions))
):
sge = next(iter(self._sge_partitions[sge_part_color]))
ge = next(iter(self._ge_partitions[ge_part_color]))
if self.edge_equality(self.subgraph, sge, self.graph, ge):
self._edge_compat_[sge_part_color] = ge_part_color
return self._edge_compat_
@staticmethod
def _node_match_maker(cmp):
@wraps(cmp)
def comparer(graph1, node1, graph2, node2):
return cmp(graph1.nodes[node1], graph2.nodes[node2])
return comparer
@staticmethod
def _edge_match_maker(cmp):
@wraps(cmp)
def comparer(graph1, edge1, graph2, edge2):
return cmp(graph1.edges[edge1], graph2.edges[edge2])
return comparer
def find_isomorphisms(self, symmetry=True):
"""Find all subgraph isomorphisms between subgraph and graph
Finds isomorphisms where :attr:`subgraph` <= :attr:`graph`.
Parameters
----------
symmetry: bool
Whether symmetry should be taken into account. If False, found
isomorphisms may be symmetrically equivalent.
Yields
------
dict
The found isomorphism mappings of {graph_node: subgraph_node}.
"""
# The networkx VF2 algorithm is slightly funny in when it yields an
# empty dict and when not.
if not self.subgraph:
yield {}
return
elif not self.graph:
return
elif len(self.graph) < len(self.subgraph):
return
if symmetry:
_, cosets = self.analyze_symmetry(
self.subgraph, self._sgn_partitions, self._sge_colors
)
constraints = self._make_constraints(cosets)
else:
constraints = []
candidates = self._find_nodecolor_candidates()
la_candidates = self._get_lookahead_candidates()
for sgn in self.subgraph:
extra_candidates = la_candidates[sgn]
if extra_candidates:
candidates[sgn] = candidates[sgn] | {frozenset(extra_candidates)}
if any(candidates.values()):
start_sgn = min(candidates, key=lambda n: min(candidates[n], key=len))
candidates[start_sgn] = (intersect(candidates[start_sgn]),)
yield from self._map_nodes(start_sgn, candidates, constraints)
else:
return
@staticmethod
def _find_neighbor_color_count(graph, node, node_color, edge_color):
"""
For `node` in `graph`, count the number of edges of a specific color
it has to nodes of a specific color.
"""
counts = Counter()
neighbors = graph[node]
for neighbor in neighbors:
n_color = node_color[neighbor]
if (node, neighbor) in edge_color:
e_color = edge_color[node, neighbor]
else:
e_color = edge_color[neighbor, node]
counts[e_color, n_color] += 1
return counts
def _get_lookahead_candidates(self):
"""
Returns a mapping of {subgraph node: collection of graph nodes} for
which the graph nodes are feasible candidates for the subgraph node, as
determined by looking ahead one edge.
"""
g_counts = {}
for gn in self.graph:
g_counts[gn] = self._find_neighbor_color_count(
self.graph, gn, self._gn_colors, self._ge_colors
)
candidates = defaultdict(set)
for sgn in self.subgraph:
sg_count = self._find_neighbor_color_count(
self.subgraph, sgn, self._sgn_colors, self._sge_colors
)
new_sg_count = Counter()
for (sge_color, sgn_color), count in sg_count.items():
try:
ge_color = self._edge_compatibility[sge_color]
gn_color = self._node_compatibility[sgn_color]
except KeyError:
pass
else:
new_sg_count[ge_color, gn_color] = count
for gn, g_count in g_counts.items():
if all(new_sg_count[x] <= g_count[x] for x in new_sg_count):
# Valid candidate
candidates[sgn].add(gn)
return candidates
def largest_common_subgraph(self, symmetry=True):
"""
Find the largest common induced subgraphs between :attr:`subgraph` and
:attr:`graph`.
Parameters
----------
symmetry: bool
Whether symmetry should be taken into account. If False, found
largest common subgraphs may be symmetrically equivalent.
Yields
------
dict
The found isomorphism mappings of {graph_node: subgraph_node}.
"""
# The networkx VF2 algorithm is slightly funny in when it yields an
# empty dict and when not.
if not self.subgraph:
yield {}
return
elif not self.graph:
return
if symmetry:
_, cosets = self.analyze_symmetry(
self.subgraph, self._sgn_partitions, self._sge_colors
)
constraints = self._make_constraints(cosets)
else:
constraints = []
candidates = self._find_nodecolor_candidates()
if any(candidates.values()):
yield from self._largest_common_subgraph(candidates, constraints)
else:
return
def analyze_symmetry(self, graph, node_partitions, edge_colors):
"""
Find a minimal set of permutations and corresponding co-sets that
describe the symmetry of :attr:`subgraph`.
Returns
-------
set[frozenset]
The found permutations. This is a set of frozenset of pairs of node
keys which can be exchanged without changing :attr:`subgraph`.
dict[collections.abc.Hashable, set[collections.abc.Hashable]]
The found co-sets. The co-sets is a dictionary of {node key:
set of node keys}. Every key-value pair describes which `values`
can be interchanged without changing nodes less than `key`.
"""
if self._symmetry_cache is not None:
key = hash(
(
tuple(graph.nodes),
tuple(graph.edges),
tuple(map(tuple, node_partitions)),
tuple(edge_colors.items()),
)
)
if key in self._symmetry_cache:
return self._symmetry_cache[key]
node_partitions = list(
self._refine_node_partitions(graph, node_partitions, edge_colors)
)
assert len(node_partitions) == 1
node_partitions = node_partitions[0]
permutations, cosets = self._process_ordered_pair_partitions(
graph, node_partitions, node_partitions, edge_colors
)
if self._symmetry_cache is not None:
self._symmetry_cache[key] = permutations, cosets
return permutations, cosets
def is_isomorphic(self, symmetry=False):
"""
Returns True if :attr:`graph` is isomorphic to :attr:`subgraph` and
False otherwise.
Returns
-------
bool
"""
return len(self.subgraph) == len(self.graph) and self.subgraph_is_isomorphic(
symmetry
)
def subgraph_is_isomorphic(self, symmetry=False):
"""
Returns True if a subgraph of :attr:`graph` is isomorphic to
:attr:`subgraph` and False otherwise.
Returns
-------
bool
"""
# symmetry=False, since we only need to know whether there is any
# example; figuring out all symmetry elements probably costs more time
# than it gains.
isom = next(self.subgraph_isomorphisms_iter(symmetry=symmetry), None)
return isom is not None
def isomorphisms_iter(self, symmetry=True):
"""
Does the same as :meth:`find_isomorphisms` if :attr:`graph` and
:attr:`subgraph` have the same number of nodes.
"""
if len(self.graph) == len(self.subgraph):
yield from self.subgraph_isomorphisms_iter(symmetry=symmetry)
def subgraph_isomorphisms_iter(self, symmetry=True):
"""Alternative name for :meth:`find_isomorphisms`."""
return self.find_isomorphisms(symmetry)
def _find_nodecolor_candidates(self):
"""
Per node in subgraph find all nodes in graph that have the same color.
"""
candidates = defaultdict(set)
for sgn in self.subgraph.nodes:
sgn_color = self._sgn_colors[sgn]
if sgn_color in self._node_compatibility:
gn_color = self._node_compatibility[sgn_color]
candidates[sgn].add(frozenset(self._gn_partitions[gn_color]))
else:
candidates[sgn].add(frozenset())
candidates = dict(candidates)
for sgn, options in candidates.items():
candidates[sgn] = frozenset(options)
return candidates
@staticmethod
def _make_constraints(cosets):
"""
Turn cosets into constraints.
"""
constraints = []
for node_i, node_ts in cosets.items():
for node_t in node_ts:
if node_i != node_t:
# Node i must be smaller than node t.
constraints.append((node_i, node_t))
return constraints
@staticmethod
def _find_node_edge_color(graph, node_colors, edge_colors):
"""
For every node in graph, come up with a color that combines 1) the
color of the node, and 2) the number of edges of a color to each type
of node.
"""
counts = defaultdict(lambda: defaultdict(int))
for node1, node2 in graph.edges:
if (node1, node2) in edge_colors:
# FIXME directed graphs
ecolor = edge_colors[node1, node2]
else:
ecolor = edge_colors[node2, node1]
# Count per node how many edges it has of what color to nodes of
# what color
counts[node1][ecolor, node_colors[node2]] += 1
counts[node2][ecolor, node_colors[node1]] += 1
node_edge_colors = dict()
for node in graph.nodes:
node_edge_colors[node] = node_colors[node], set(counts[node].items())
return node_edge_colors
@staticmethod
def _get_permutations_by_length(items):
"""
Get all permutations of items, but only permute items with the same
length.
>>> found = list(ISMAGS._get_permutations_by_length([[1], [2], [3, 4], [4, 5]]))
>>> answer = [
... (([1], [2]), ([3, 4], [4, 5])),
... (([1], [2]), ([4, 5], [3, 4])),
... (([2], [1]), ([3, 4], [4, 5])),
... (([2], [1]), ([4, 5], [3, 4])),
... ]
>>> found == answer
True
"""
by_len = defaultdict(list)
for item in items:
by_len[len(item)].append(item)
yield from itertools.product(
*(itertools.permutations(by_len[l]) for l in sorted(by_len))
)
@classmethod
def _refine_node_partitions(cls, graph, node_partitions, edge_colors, branch=False):
"""
Given a partition of nodes in graph, make the partitions smaller such
that all nodes in a partition have 1) the same color, and 2) the same
number of edges to specific other partitions.
"""
def equal_color(node1, node2):
return node_edge_colors[node1] == node_edge_colors[node2]
node_partitions = list(node_partitions)
node_colors = partition_to_color(node_partitions)
node_edge_colors = cls._find_node_edge_color(graph, node_colors, edge_colors)
if all(
are_all_equal(node_edge_colors[node] for node in partition)
for partition in node_partitions
):
yield node_partitions
return
new_partitions = []
output = [new_partitions]
for partition in node_partitions:
if not are_all_equal(node_edge_colors[node] for node in partition):
refined = make_partitions(partition, equal_color)
if (
branch
and len(refined) != 1
and len({len(r) for r in refined}) != len([len(r) for r in refined])
):
# This is where it breaks. There are multiple new cells
# in refined with the same length, and their order
# matters.
# So option 1) Hit it with a big hammer and simply make all
# orderings.
permutations = cls._get_permutations_by_length(refined)
new_output = []
for n_p in output:
for permutation in permutations:
new_output.append(n_p + list(permutation[0]))
output = new_output
else:
for n_p in output:
n_p.extend(sorted(refined, key=len))
else:
for n_p in output:
n_p.append(partition)
for n_p in output:
yield from cls._refine_node_partitions(graph, n_p, edge_colors, branch)
def _edges_of_same_color(self, sgn1, sgn2):
"""
Returns all edges in :attr:`graph` that have the same colour as the
edge between sgn1 and sgn2 in :attr:`subgraph`.
"""
if (sgn1, sgn2) in self._sge_colors:
# FIXME directed graphs
sge_color = self._sge_colors[sgn1, sgn2]
else:
sge_color = self._sge_colors[sgn2, sgn1]
if sge_color in self._edge_compatibility:
ge_color = self._edge_compatibility[sge_color]
g_edges = self._ge_partitions[ge_color]
else:
g_edges = []
return g_edges
def _map_nodes(self, sgn, candidates, constraints, mapping=None, to_be_mapped=None):
"""
Find all subgraph isomorphisms honoring constraints.
"""
if mapping is None:
mapping = {}
else:
mapping = mapping.copy()
if to_be_mapped is None:
to_be_mapped = set(self.subgraph.nodes)
# Note, we modify candidates here. Doesn't seem to affect results, but
# remember this.
# candidates = candidates.copy()
sgn_candidates = intersect(candidates[sgn])
candidates[sgn] = frozenset([sgn_candidates])
for gn in sgn_candidates:
# We're going to try to map sgn to gn.
if gn in mapping.values() or sgn not in to_be_mapped:
# gn is already mapped to something
continue # pragma: no cover
# REDUCTION and COMBINATION
mapping[sgn] = gn
# BASECASE
if to_be_mapped == set(mapping.keys()):
yield {v: k for k, v in mapping.items()}
continue
left_to_map = to_be_mapped - set(mapping.keys())
new_candidates = candidates.copy()
sgn_neighbours = set(self.subgraph[sgn])
not_gn_neighbours = set(self.graph.nodes) - set(self.graph[gn])
for sgn2 in left_to_map:
if sgn2 not in sgn_neighbours:
gn2_options = not_gn_neighbours
else:
# Get all edges to gn of the right color:
g_edges = self._edges_of_same_color(sgn, sgn2)
# FIXME directed graphs
# And all nodes involved in those which are connected to gn
gn2_options = {n for e in g_edges for n in e if gn in e}
# Node color compatibility should be taken care of by the
# initial candidate lists made by find_subgraphs
# Add gn2_options to the right collection. Since new_candidates
# is a dict of frozensets of frozensets of node indices it's
# a bit clunky. We can't do .add, and + also doesn't work. We
# could do |, but I deem union to be clearer.
new_candidates[sgn2] = new_candidates[sgn2].union(
[frozenset(gn2_options)]
)
if (sgn, sgn2) in constraints:
gn2_options = {gn2 for gn2 in self.graph if gn2 > gn}
elif (sgn2, sgn) in constraints:
gn2_options = {gn2 for gn2 in self.graph if gn2 < gn}
else:
continue # pragma: no cover
new_candidates[sgn2] = new_candidates[sgn2].union(
[frozenset(gn2_options)]
)
# The next node is the one that is unmapped and has fewest
# candidates
# Pylint disables because it's a one-shot function.
next_sgn = min(
left_to_map, key=lambda n: min(new_candidates[n], key=len)
) # pylint: disable=cell-var-from-loop
yield from self._map_nodes(
next_sgn,
new_candidates,
constraints,
mapping=mapping,
to_be_mapped=to_be_mapped,
)
# Unmap sgn-gn. Strictly not necessary since it'd get overwritten
# when making a new mapping for sgn.
# del mapping[sgn]
def _largest_common_subgraph(self, candidates, constraints, to_be_mapped=None):
"""
Find all largest common subgraphs honoring constraints.
"""
if to_be_mapped is None:
to_be_mapped = {frozenset(self.subgraph.nodes)}
# The LCS problem is basically a repeated subgraph isomorphism problem
# with smaller and smaller subgraphs. We store the nodes that are
# "part of" the subgraph in to_be_mapped, and we make it a little
# smaller every iteration.
# pylint disable becuase it's guarded against by default value
current_size = len(
next(iter(to_be_mapped), [])
) # pylint: disable=stop-iteration-return
found_iso = False
if current_size <= len(self.graph):
# There's no point in trying to find isomorphisms of
# graph >= subgraph if subgraph has more nodes than graph.
# Try the isomorphism first with the nodes with lowest ID. So sort
# them. Those are more likely to be part of the final
# correspondence. This makes finding the first answer(s) faster. In
# theory.
for nodes in sorted(to_be_mapped, key=sorted):
# Find the isomorphism between subgraph[to_be_mapped] <= graph
next_sgn = min(nodes, key=lambda n: min(candidates[n], key=len))
isomorphs = self._map_nodes(
next_sgn, candidates, constraints, to_be_mapped=nodes
)
# This is effectively `yield from isomorphs`, except that we look
# whether an item was yielded.
try:
item = next(isomorphs)
except StopIteration:
pass
else:
yield item
yield from isomorphs
found_iso = True
# BASECASE
if found_iso or current_size == 1:
# Shrinking has no point because either 1) we end up with a smaller
# common subgraph (and we want the largest), or 2) there'll be no
# more subgraph.
return
left_to_be_mapped = set()
for nodes in to_be_mapped:
for sgn in nodes:
# We're going to remove sgn from to_be_mapped, but subject to
# symmetry constraints. We know that for every constraint we
# have those subgraph nodes are equal. So whenever we would
# remove the lower part of a constraint, remove the higher
# instead. This is all dealth with by _remove_node. And because
# left_to_be_mapped is a set, we don't do double work.
# And finally, make the subgraph one node smaller.
# REDUCTION
new_nodes = self._remove_node(sgn, nodes, constraints)
left_to_be_mapped.add(new_nodes)
# COMBINATION
yield from self._largest_common_subgraph(
candidates, constraints, to_be_mapped=left_to_be_mapped
)
@staticmethod
def _remove_node(node, nodes, constraints):
"""
Returns a new set where node has been removed from nodes, subject to
symmetry constraints. We know, that for every constraint we have
those subgraph nodes are equal. So whenever we would remove the
lower part of a constraint, remove the higher instead.
"""
while True:
for low, high in constraints:
if low == node and high in nodes:
node = high
break
else: # no break, couldn't find node in constraints
break
return frozenset(nodes - {node})
@staticmethod
def _find_permutations(top_partitions, bottom_partitions):
"""
Return the pairs of top/bottom partitions where the partitions are
different. Ensures that all partitions in both top and bottom
partitions have size 1.
"""
# Find permutations
permutations = set()
for top, bot in zip(top_partitions, bottom_partitions):
# top and bot have only one element
if len(top) != 1 or len(bot) != 1:
raise IndexError(
"Not all nodes are coupled. This is"
f" impossible: {top_partitions}, {bottom_partitions}"
)
if top != bot:
permutations.add(frozenset((next(iter(top)), next(iter(bot)))))
return permutations
@staticmethod
def _update_orbits(orbits, permutations):
"""
Update orbits based on permutations. Orbits is modified in place.
For every pair of items in permutations their respective orbits are
merged.
"""
for permutation in permutations:
node, node2 = permutation
# Find the orbits that contain node and node2, and replace the
# orbit containing node with the union
first = second = None
for idx, orbit in enumerate(orbits):
if first is not None and second is not None:
break
if node in orbit:
first = idx
if node2 in orbit:
second = idx
if first != second:
orbits[first].update(orbits[second])
del orbits[second]
def _couple_nodes(
self,
top_partitions,
bottom_partitions,
pair_idx,
t_node,
b_node,
graph,
edge_colors,
):
"""
Generate new partitions from top and bottom_partitions where t_node is
coupled to b_node. pair_idx is the index of the partitions where t_ and
b_node can be found.
"""
t_partition = top_partitions[pair_idx]
b_partition = bottom_partitions[pair_idx]
assert t_node in t_partition and b_node in b_partition
# Couple node to node2. This means they get their own partition
new_top_partitions = [top.copy() for top in top_partitions]
new_bottom_partitions = [bot.copy() for bot in bottom_partitions]
new_t_groups = {t_node}, t_partition - {t_node}
new_b_groups = {b_node}, b_partition - {b_node}
# Replace the old partitions with the coupled ones
del new_top_partitions[pair_idx]
del new_bottom_partitions[pair_idx]
new_top_partitions[pair_idx:pair_idx] = new_t_groups
new_bottom_partitions[pair_idx:pair_idx] = new_b_groups
new_top_partitions = self._refine_node_partitions(
graph, new_top_partitions, edge_colors
)
new_bottom_partitions = self._refine_node_partitions(
graph, new_bottom_partitions, edge_colors, branch=True
)
new_top_partitions = list(new_top_partitions)
assert len(new_top_partitions) == 1
new_top_partitions = new_top_partitions[0]
for bot in new_bottom_partitions:
yield list(new_top_partitions), bot
def _process_ordered_pair_partitions(
self,
graph,
top_partitions,
bottom_partitions,
edge_colors,
orbits=None,
cosets=None,
):
"""
Processes ordered pair partitions as per the reference paper. Finds and
returns all permutations and cosets that leave the graph unchanged.
"""
if orbits is None:
orbits = [{node} for node in graph.nodes]
else:
# Note that we don't copy orbits when we are given one. This means
# we leak information between the recursive branches. This is
# intentional!
orbits = orbits
if cosets is None:
cosets = {}
else:
cosets = cosets.copy()
assert all(
len(t_p) == len(b_p) for t_p, b_p in zip(top_partitions, bottom_partitions)
)
# BASECASE
if all(len(top) == 1 for top in top_partitions):
# All nodes are mapped
permutations = self._find_permutations(top_partitions, bottom_partitions)
self._update_orbits(orbits, permutations)
if permutations:
return [permutations], cosets
else:
return [], cosets
permutations = []
unmapped_nodes = {
(node, idx)
for idx, t_partition in enumerate(top_partitions)
for node in t_partition
if len(t_partition) > 1
}
node, pair_idx = min(unmapped_nodes)
b_partition = bottom_partitions[pair_idx]
for node2 in sorted(b_partition):
if len(b_partition) == 1:
# Can never result in symmetry
continue
if node != node2 and any(
node in orbit and node2 in orbit for orbit in orbits
):
# Orbit prune branch
continue
# REDUCTION
# Couple node to node2
partitions = self._couple_nodes(
top_partitions,
bottom_partitions,
pair_idx,
node,
node2,
graph,
edge_colors,
)
for opp in partitions:
new_top_partitions, new_bottom_partitions = opp
new_perms, new_cosets = self._process_ordered_pair_partitions(
graph,
new_top_partitions,
new_bottom_partitions,
edge_colors,
orbits,
cosets,
)
# COMBINATION
permutations += new_perms
cosets.update(new_cosets)
mapped = {
k
for top, bottom in zip(top_partitions, bottom_partitions)
for k in top
if len(top) == 1 and top == bottom
}
ks = {k for k in graph.nodes if k < node}
# Have all nodes with ID < node been mapped?
find_coset = ks <= mapped and node not in cosets
if find_coset:
# Find the orbit that contains node
for orbit in orbits:
if node in orbit:
cosets[node] = orbit.copy()
return permutations, cosets
| gpl-3.0 | -174,510,596,902,812,900 | 36.002595 | 88 | 0.56678 | false |
ngoix/OCRF | sklearn/datasets/fetch_ml_mieux.py | 1 | 13778 | from zipfile import ZipFile
from io import BytesIO
import logging
from os.path import exists, join
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
from .base import get_data_home
from .base import Bunch
from .base import _pkl_filepath
from ..utils.fixes import makedirs
from ..externals import joblib
from ..utils import check_random_state
logger = logging.getLogger()
def fetch_spambase(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the spambase dataset, downloading it if necessary.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
dataset.target : numpy array of shape (581012,)
Each value corresponds to one of the 7 forest spambases with values
ranging between 1 to 7.
dataset.DESCR : string
Description of the forest spambase dataset.
"""
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/spambase/spambase.zip')
data_home = get_data_home(data_home=data_home)
spambase_dir = join(data_home, "spambase")
samples_path = _pkl_filepath(spambase_dir, "samples")
targets_path = _pkl_filepath(spambase_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(spambase_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
file_ = ZipFile(f, mode='r').open('spambase.data')
Xy = np.genfromtxt(file_, delimiter=',')
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_annthyroid(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the annthyroid dataset, downloading it if necessary.
"""
URL1 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/thyroid-disease/ann-train.data')
URL2 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/thyroid-disease/ann-test.data')
data_home = get_data_home(data_home=data_home)
annthyroid_dir = join(data_home, "annthyroid")
samples_path = _pkl_filepath(annthyroid_dir, "samples")
targets_path = _pkl_filepath(annthyroid_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(annthyroid_dir, exist_ok=True)
logger.warning("Downloading %s" % URL1)
f = BytesIO(urlopen(URL1).read())
# ou X = np.load(f)
Xy1 = np.genfromtxt(f, delimiter=' ')
logger.warning("Downloading %s" % URL2)
f = BytesIO(urlopen(URL2).read())
Xy2 = np.genfromtxt(f, delimiter=' ')
Xy = np.r_[Xy1, Xy2]
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_arrhythmia(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the arrhythmia dataset, downloading it if necessary.
"""
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/arrhythmia/arrhythmia.data')
data_home = get_data_home(data_home=data_home)
arrhythmia_dir = join(data_home, "arrhythmia")
samples_path = _pkl_filepath(arrhythmia_dir, "samples")
targets_path = _pkl_filepath(arrhythmia_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(arrhythmia_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
# ou X = np.load(f)
Xy = np.genfromtxt(f, delimiter=',')
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_pendigits(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the pendigits dataset, downloading it if necessary.
"""
URL1 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/pendigits/pendigits.tra')
URL2 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/pendigits/pendigits.tes')
data_home = get_data_home(data_home=data_home)
pendigits_dir = join(data_home, "pendigits")
samples_path = _pkl_filepath(pendigits_dir, "samples")
targets_path = _pkl_filepath(pendigits_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(pendigits_dir, exist_ok=True)
logger.warning("Downloading %s" % URL1)
f = BytesIO(urlopen(URL1).read())
# ou X = np.load(f)
Xy1 = np.genfromtxt(f, delimiter=',')
logger.warning("Downloading %s" % URL2)
f = BytesIO(urlopen(URL2).read())
Xy2 = np.genfromtxt(f, delimiter=',')
Xy = np.r_[Xy1, Xy2]
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_pima(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the pima-indians-diabetes dataset, downloading it if necessary.
"""
URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'pima-indians-diabetes/pima-indians-diabetes.data')
data_home = get_data_home(data_home=data_home)
pima_dir = join(data_home, "pima")
samples_path = _pkl_filepath(pima_dir, "samples")
targets_path = _pkl_filepath(pima_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(pima_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
# ou X = np.load(f)
Xy = np.genfromtxt(f, delimiter=',')
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_wilt(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the wilt dataset, downloading it if necessary.
"""
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/00285/wilt.zip')
data_home = get_data_home(data_home=data_home)
wilt_dir = join(data_home, "wilt")
samples_path = _pkl_filepath(wilt_dir, "samples")
targets_path = _pkl_filepath(wilt_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(wilt_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
# ou X = np.load(f)
ff = ZipFile(f, mode='r')
file1 = ff.open('training.csv')
Xy1 = np.genfromtxt(file1, delimiter=',', dtype=object)
file2 = ff.open('testing.csv')
Xy2 = np.genfromtxt(file2, delimiter=',', dtype=object)
# the first row is nan:
Xy1 = Xy1[1:, :]
Xy2 = Xy2[1:, :]
Xy = np.r_[Xy1, Xy2]
X = Xy[:, 1:].astype(float)
y = Xy[:, 0]
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_internet_ads(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the internet_ads dataset, downloading it if necessary.
"""
URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'internet_ads/ad.data')
data_home = get_data_home(data_home=data_home)
internet_ads_dir = join(data_home, "internet_ads")
samples_path = _pkl_filepath(internet_ads_dir, "samples")
targets_path = _pkl_filepath(internet_ads_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(internet_ads_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
Xy = np.genfromtxt(f, delimiter=',', dtype=object)
X = Xy[:, :-1].astype(object)
X = np.delete(X, [0, 1, 2, 3], axis=1)
# remaining features are not continuous: not adapted to OneClassRF
X = X.astype(float)
y = Xy[:, -1]
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
def fetch_adult(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the adult dataset, downloading it if necessary.
"""
URL1 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.data')
URL2 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.test')
data_home = get_data_home(data_home=data_home)
adult_dir = join(data_home, "adult")
samples_path = _pkl_filepath(adult_dir, "samples")
targets_path = _pkl_filepath(adult_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(adult_dir, exist_ok=True)
logger.warning("Downloading %s" % URL1)
f = BytesIO(urlopen(URL1).read())
# ou X = np.load(f)
Xy1 = np.genfromtxt(f, delimiter=',', dtype=object)
# select continuous features:
Xy1 = Xy1[:, [0, 2, 4, 10, 11, 12, -1]]
logger.warning("Downloading %s" % URL2)
f = BytesIO(urlopen(URL2).read())
# idem that Xy1 but skip first line which contains instructions:
Xy2 = np.genfromtxt(f, delimiter=',', skip_header=1, dtype=object)
Xy2 = Xy2[:, [0, 2, 4, 10, 11, 12, -1]]
Xy = np.r_[Xy1, Xy2]
X = Xy[:, :-1].astype(float)
y = Xy[:, -1]
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
| bsd-3-clause | -8,359,151,599,805,482,000 | 31.495283 | 78 | 0.604369 | false |
capitalone/cloud-custodian | c7n/filters/vpc.py | 1 | 10368 | # Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.exceptions import PolicyValidationError
from c7n.utils import local_session, type_schema
from .core import Filter, ValueFilter
from .related import RelatedResourceFilter
class MatchResourceValidator:
def validate(self):
if self.data.get('match-resource'):
self.required_keys = set('key',)
return super(MatchResourceValidator, self).validate()
class SecurityGroupFilter(MatchResourceValidator, RelatedResourceFilter):
"""Filter a resource by its associated security groups."""
schema = type_schema(
'security-group', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
schema_alias = True
RelatedResource = "c7n.resources.vpc.SecurityGroup"
AnnotationKey = "matched-security-groups"
class SubnetFilter(MatchResourceValidator, RelatedResourceFilter):
"""Filter a resource by its associated subnets."""
schema = type_schema(
'subnet', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
schema_alias = True
RelatedResource = "c7n.resources.vpc.Subnet"
AnnotationKey = "matched-subnets"
class VpcFilter(MatchResourceValidator, RelatedResourceFilter):
"""Filter a resource by its associated vpc."""
schema = type_schema(
'vpc', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
schema_alias = True
RelatedResource = "c7n.resources.vpc.Vpc"
AnnotationKey = "matched-vpcs"
class DefaultVpcBase(Filter):
"""Filter to resources in a default vpc."""
vpcs = None
default_vpc = None
permissions = ('ec2:DescribeVpcs',)
def match(self, vpc_id):
if self.default_vpc is None:
self.log.debug("querying default vpc %s" % vpc_id)
client = local_session(self.manager.session_factory).client('ec2')
vpcs = [v['VpcId'] for v
in client.describe_vpcs()['Vpcs']
if v['IsDefault']]
if vpcs:
self.default_vpc = vpcs.pop()
return vpc_id == self.default_vpc and True or False
class NetworkLocation(Filter):
"""On a network attached resource, determine intersection of
security-group attributes, subnet attributes, and resource attributes.
The use case is a bit specialized, for most use cases using `subnet`
and `security-group` filters suffice. but say for example you wanted to
verify that an ec2 instance was only using subnets and security groups
with a given tag value, and that tag was not present on the resource.
:Example:
.. code-block:: yaml
policies:
- name: ec2-mismatched-sg-remove
resource: ec2
filters:
- type: network-location
compare: ["resource","security-group"]
key: "tag:TEAM_NAME"
ignore:
- "tag:TEAM_NAME": Enterprise
actions:
- type: modify-security-groups
remove: network-location
isolation-group: sg-xxxxxxxx
"""
schema = type_schema(
'network-location',
**{'missing-ok': {
'type': 'boolean',
'default': False,
'description': (
"How to handle missing keys on elements, by default this causes"
"resources to be considered not-equal")},
'match': {'type': 'string', 'enum': ['equal', 'not-equal'],
'default': 'non-equal'},
'compare': {
'type': 'array',
'description': (
'Which elements of network location should be considered when'
' matching.'),
'default': ['resource', 'subnet', 'security-group'],
'items': {
'enum': ['resource', 'subnet', 'security-group']}},
'key': {
'type': 'string',
'description': 'The attribute expression that should be matched on'},
'max-cardinality': {
'type': 'integer', 'default': 1,
'title': ''},
'ignore': {'type': 'array', 'items': {'type': 'object'}},
'required': ['key'],
})
schema_alias = True
permissions = ('ec2:DescribeSecurityGroups', 'ec2:DescribeSubnets')
def validate(self):
rfilters = self.manager.filter_registry.keys()
if 'subnet' not in rfilters:
raise PolicyValidationError(
"network-location requires resource subnet filter availability on %s" % (
self.manager.data))
if 'security-group' not in rfilters:
raise PolicyValidationError(
"network-location requires resource security-group filter availability on %s" % (
self.manager.data))
return self
def process(self, resources, event=None):
self.sg = self.manager.filter_registry.get('security-group')({}, self.manager)
related_sg = self.sg.get_related(resources)
self.subnet = self.manager.filter_registry.get('subnet')({}, self.manager)
related_subnet = self.subnet.get_related(resources)
self.sg_model = self.manager.get_resource_manager('security-group').get_model()
self.subnet_model = self.manager.get_resource_manager('subnet').get_model()
self.vf = self.manager.filter_registry.get('value')({}, self.manager)
# filter options
key = self.data.get('key')
self.compare = self.data.get('compare', ['subnet', 'security-group', 'resource'])
self.max_cardinality = self.data.get('max-cardinality', 1)
self.match = self.data.get('match', 'not-equal')
self.missing_ok = self.data.get('missing-ok', False)
results = []
for r in resources:
resource_sgs = self.filter_ignored(
[related_sg[sid] for sid in self.sg.get_related_ids([r])])
resource_subnets = self.filter_ignored([
related_subnet[sid] for sid in self.subnet.get_related_ids([r])])
found = self.process_resource(r, resource_sgs, resource_subnets, key)
if found:
results.append(found)
return results
def filter_ignored(self, resources):
ignores = self.data.get('ignore', ())
results = []
for r in resources:
found = False
for i in ignores:
for k, v in i.items():
if self.vf.get_resource_value(k, r) == v:
found = True
if found is True:
break
if found is True:
continue
results.append(r)
return results
def process_resource(self, r, resource_sgs, resource_subnets, key):
evaluation = []
sg_space = set()
subnet_space = set()
if 'subnet' in self.compare:
subnet_values = {
rsub[self.subnet_model.id]: self.subnet.get_resource_value(key, rsub)
for rsub in resource_subnets}
if not self.missing_ok and None in subnet_values.values():
evaluation.append({
'reason': 'SubnetLocationAbsent',
'subnets': subnet_values})
subnet_space = set(filter(None, subnet_values.values()))
if len(subnet_space) > self.max_cardinality:
evaluation.append({
'reason': 'SubnetLocationCardinality',
'subnets': subnet_values})
if 'security-group' in self.compare:
sg_values = {
rsg[self.sg_model.id]: self.sg.get_resource_value(key, rsg)
for rsg in resource_sgs}
if not self.missing_ok and None in sg_values.values():
evaluation.append({
'reason': 'SecurityGroupLocationAbsent',
'security-groups': sg_values})
sg_space = set(filter(None, sg_values.values()))
if len(sg_space) > self.max_cardinality:
evaluation.append({
'reason': 'SecurityGroupLocationCardinality',
'security-groups': sg_values})
if ('subnet' in self.compare and
'security-group' in self.compare and
sg_space != subnet_space):
evaluation.append({
'reason': 'LocationMismatch',
'subnets': subnet_values,
'security-groups': sg_values})
if 'resource' in self.compare:
r_value = self.vf.get_resource_value(key, r)
if not self.missing_ok and r_value is None:
evaluation.append({
'reason': 'ResourceLocationAbsent',
'resource': r_value})
elif 'security-group' in self.compare and resource_sgs and r_value not in sg_space:
evaluation.append({
'reason': 'ResourceLocationMismatch',
'resource': r_value,
'security-groups': sg_values})
elif 'subnet' in self.compare and resource_subnets and r_value not in subnet_space:
evaluation.append({
'reason': 'ResourceLocationMismatch',
'resource': r_value,
'subnet': subnet_values})
if 'security-group' in self.compare and resource_sgs:
mismatched_sgs = {sg_id: sg_value
for sg_id, sg_value in sg_values.items()
if sg_value != r_value}
if mismatched_sgs:
evaluation.append({
'reason': 'SecurityGroupMismatch',
'resource': r_value,
'security-groups': mismatched_sgs})
if evaluation and self.match == 'not-equal':
r['c7n:NetworkLocation'] = evaluation
return r
elif not evaluation and self.match == 'equal':
return r
| apache-2.0 | -2,030,043,321,442,465,000 | 37.831461 | 97 | 0.558738 | false |
vmagamedov/pi | fixers/fix_imports.py | 1 | 2965 | import os.path
from lib2to3.pygram import python_symbols
from lib2to3.fixer_util import Name
from lib2to3.fixes.fix_imports import FixImports as BaseFixImports, alternates
PATH = '{}/../pi/_requires'.format(os.path.dirname(__file__))
LIBS = [i.rstrip('.py') for i in os.listdir(PATH)
if not i.startswith('_') and not i.endswith(('.pyc', '.pyo'))]
def build_pattern(mapping):
mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
dotted_mod_list = ' | '.join(["module_name=dotted_name<'{}' ('.' NAME)*>"
.format(key)
for key in mapping])
bare_names = alternates(mapping.keys())
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (dotted_mod_list, dotted_mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % dotted_mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (dotted_mod_list, dotted_mod_list)
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (mod_list, mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (mod_list, mod_list)
# Find usages of module members in code e.g. thread.foo(bar)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(BaseFixImports):
mapping = {"{}".format(lib): 'pi._requires.{}'.format(lib)
for lib in LIBS}
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod and import_mod.type == python_symbols.dotted_name:
mod_name = import_mod.children[0].value
new_name = self.mapping[mod_name]
tail = ''.join(child.value for child in import_mod.children[1:])
import_mod.replace(Name(new_name + tail, prefix=import_mod.prefix))
if "name_import" in results:
self.replace[mod_name] = new_name
if "multiple_imports" in results:
results = self.match(node)
if results:
self.transform(node, results)
else:
return super().transform(node, results)
| bsd-3-clause | -102,682,145,929,447,380 | 41.357143 | 79 | 0.536931 | false |
swapnilsm/redis-rw-lock | setup.py | 1 | 1796 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
from os.path import dirname
from os.path import join
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='redis-rw-lock',
version='1.0.6',
license='MIT',
description="Redis based Reader-Writer lock with Writer's priority.",
long_description='',
author='Swapnil S. Mahajan',
author_email='[email protected]',
url='https://github.com/swapnilsm/redis-rw-lock',
packages=['redis_rw_lock', ],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
'redis', 'lock', 'rwlock'
],
install_requires=[
'redis>=2.10.0',
'python-redis-lock>=3.2.0'
]
)
| mit | 9,055,668,874,280,245,000 | 29.965517 | 90 | 0.603563 | false |
rwatson/chromium-capsicum | webkit/tools/layout_tests/layout_package/platform_utils_mac.py | 1 | 5197 | # Copyright (c) 2008-2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is the Mac implementation of the layout_package.platform_utils
package. This file should only be imported by that package."""
import os
import platform
import signal
import subprocess
import path_utils
def PlatformName():
"""Returns the name of the platform we're currently running on."""
# At the moment all chromium mac results are version-independent. At some
# point we may need to return 'chromium-mac' + PlatformVersion()
return 'chromium-mac'
def PlatformVersion():
"""Returns the version string for the platform, e.g. '-vista' or
'-snowleopard'. If the platform does not distinguish between
minor versions, it returns ''."""
os_version_string = platform.mac_ver()[0] # e.g. "10.5.6"
if not os_version_string:
return '-leopard'
release_version = int(os_version_string.split('.')[1])
# we don't support 'tiger' or earlier releases
if release_version == 5:
return '-leopard'
elif release_version == 6:
return '-snowleopard'
return ''
def GetNumCores():
"""Returns the number of cores on the machine. For hyperthreaded machines,
this will be double the number of actual processors."""
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# TODO: We should add leopard and snowleopard to the list of paths to check
# once we start running the tests from snowleopard.
def BaselineSearchPath(all_versions=False):
"""Returns the list of directories to search for baselines/results, in
order of preference. Paths are relative to the top of the source tree."""
return [path_utils.ChromiumBaselinePath(PlatformName()),
path_utils.WebKitBaselinePath('mac' + PlatformVersion()),
path_utils.WebKitBaselinePath('mac')]
def WDiffPath():
"""Path to the WDiff executable, which we assume is already installed and
in the user's $PATH."""
return 'wdiff'
def ImageDiffPath(target):
"""Path to the image_diff executable
Args:
target: build type - 'Debug','Release',etc."""
return path_utils.PathFromBase('xcodebuild', target, 'image_diff')
def LayoutTestHelperPath(target):
"""Path to the layout_test_helper executable, if needed, empty otherwise
Args:
target: build type - 'Debug','Release',etc."""
return path_utils.PathFromBase('xcodebuild', target, 'layout_test_helper')
def TestShellPath(target):
"""Path to the test_shell executable.
Args:
target: build type - 'Debug','Release',etc."""
# TODO(pinkerton): make |target| happy with case-sensitive file systems.
return path_utils.PathFromBase('xcodebuild', target, 'TestShell.app',
'Contents', 'MacOS','TestShell')
def ApacheExecutablePath():
"""Returns the executable path to start Apache"""
return os.path.join("/usr", "sbin", "httpd")
def ApacheConfigFilePath():
"""Returns the path to Apache config file"""
return path_utils.PathFromBase("third_party", "WebKit", "LayoutTests", "http",
"conf", "apache2-httpd.conf")
def LigHTTPdExecutablePath():
"""Returns the executable path to start LigHTTPd"""
return path_utils.PathFromBase('third_party', 'lighttpd', 'mac',
'bin', 'lighttpd')
def LigHTTPdModulePath():
"""Returns the library module path for LigHTTPd"""
return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', 'lib')
def LigHTTPdPHPPath():
"""Returns the PHP executable path for LigHTTPd"""
return path_utils.PathFromBase('third_party', 'lighttpd', 'mac', 'bin',
'php-cgi')
def ShutDownHTTPServer(server_pid):
"""Shut down the lighttpd web server. Blocks until it's fully shut down.
Args:
server_pid: The process ID of the running server.
"""
# server_pid is not set when "http_server.py stop" is run manually.
if server_pid is None:
# TODO(mmoss) This isn't ideal, since it could conflict with lighttpd
# processes not started by http_server.py, but good enough for now.
KillAllProcess('lighttpd')
KillAllProcess('httpd')
else:
try:
os.kill(server_pid, signal.SIGTERM)
#TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
except OSError:
# Sometimes we get a bad PID (e.g. from a stale httpd.pid file), so if
# kill fails on the given PID, just try to 'killall' web servers.
ShutDownHTTPServer(None)
def KillProcess(pid):
"""Forcefully kill the process.
Args:
pid: The id of the process to be killed.
"""
os.kill(pid, signal.SIGKILL)
def KillAllProcess(process_name):
# On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
# -SIGNALNUMBER must come first. Example problem:
# $ killall -u $USER -TERM lighttpd
# killall: illegal option -- T
# Use of the earlier -TERM placement is just fine on 10.5.
null = open("/dev/null");
subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'), process_name],
stderr=null)
null.close()
def KillAllTestShells():
"""Kills all instances of the test_shell binary currently running."""
KillAllProcess('TestShell')
| bsd-3-clause | -4,367,166,175,675,160,000 | 34.59589 | 80 | 0.689051 | false |
mareknetusil/twist | demo/dynamic/flap.py | 1 | 1495 | from __future__ import print_function
__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2010 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
class Obstruction(Hyperelasticity):
def mesh(self):
n = 4
return RectangleMesh(Point(0, 0), Point(0.2, 0.5), n, 5*n/2)
def end_time(self):
return 4.0
def time_step(self):
return 0.001
def is_dynamic(self):
return True
def neumann_conditions(self):
fluid_force = Expression(("magnitude*t", "0.0"), magnitude=1.5, t=0, degree=0)
return [fluid_force]
def neumann_boundaries(self):
fluid_interface = "x[1] > 0.0 && x[0] == 0"
return [fluid_interface]
def dirichlet_values(self):
fix = Constant((0.0, 0.0))
return [fix]
def dirichlet_boundaries(self):
bottom = "x[1] == 0.0"
return [bottom]
def material_model(self):
mu = 60
lmbda = 90
#material = StVenantKirchhoff([mu, lmbda])
material = neoHookean({'half_nkT':mu, 'bulk':lmbda})
return material
def reference_density(self):
return 1.0
def time_stepping(self):
return "CG1"
def __str__(self):
return "An obstruction being deformed by an ambient flow"
# Setup problem
problem = Obstruction()
problem.parameters['element_degree'] = 1
# Solve problem
print(problem)
problem.solve()
| gpl-3.0 | -6,887,895,058,826,794,000 | 23.508197 | 86 | 0.599331 | false |
jinankjain/zamboni | mkt/api/paginator.py | 1 | 4202 | import urlparse
from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
from django.http import QueryDict
from django.utils.http import urlencode
from rest_framework import pagination, serializers
class ESPaginator(Paginator):
"""
A better paginator for search results
The normal Paginator does a .count() query and then a slice. Since ES
results contain the total number of results, we can take an optimistic
slice and then adjust the count.
"""
def validate_number(self, number):
"""
Validates the given 1-based page number.
This class overrides the default behavior and ignores the upper bound.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""
Returns a page object.
This class overrides the default behavior and ignores "orphans" and
assigns the count from the ES result to the Paginator.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
page = Page(self.object_list[bottom:top], number, self)
# Force the search to evaluate and then attach the count. We want to
# avoid an extra useless query even if there are no results, so we
# directly fetch the count from _results_cache instead of calling
# page.object_list.count().
# FIXME: replace by simply calling page.object_list.count() when
# https://github.com/mozilla/elasticutils/pull/212 is merged and
# released.
page.object_list.execute()
self._count = page.object_list._results_cache.count
return page
class MetaSerializer(serializers.Serializer):
"""
Serializer for the 'meta' dict holding pagination info that allows to stay
backwards-compatible with the way tastypie does pagination (using offsets
instead of page numbers), while still using a "standard" Paginator class.
"""
next = serializers.SerializerMethodField('get_next')
previous = serializers.SerializerMethodField('get_previous')
total_count = serializers.SerializerMethodField('get_total_count')
offset = serializers.SerializerMethodField('get_offset')
limit = serializers.SerializerMethodField('get_limit')
def replace_query_params(self, url, params):
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = QueryDict(query).dict()
query_dict.update(params)
query = urlencode(query_dict)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def get_offset_link_for_page(self, page, number):
request = self.context.get('request')
url = request and request.get_full_path() or ''
number = number - 1 # Pages are 1-based, but offsets are 0-based.
per_page = page.paginator.per_page
return self.replace_query_params(url, {'offset': number * per_page,
'limit': per_page})
def get_next(self, page):
if not page.has_next():
return None
return self.get_offset_link_for_page(page, page.next_page_number())
def get_previous(self, page):
if not page.has_previous():
return None
return self.get_offset_link_for_page(page, page.previous_page_number())
def get_total_count(self, page):
return page.paginator.count
def get_offset(self, page):
index = page.start_index()
if index > 0:
# start_index() is 1-based, and we want a 0-based offset, so we
# need to remove 1, unless it's already 0.
return index - 1
return index
def get_limit(self, page):
return page.paginator.per_page
class CustomPaginationSerializer(pagination.BasePaginationSerializer):
meta = MetaSerializer(source='*') # Takes the page object as the source
results_field = 'objects'
| bsd-3-clause | 899,639,658,611,445,600 | 36.185841 | 79 | 0.651832 | false |
hwen3/410-lab5 | todolist.py | 1 | 2612 | import sqlite3
from flask import Flask, render_template, g, request, session, flash, redirect, url_for, abort
DATABASE = 'test.db'
USERNAME = 'admin'
PASSWORD = 'admin'
SECRET_KEY = 'he who shall not be named'
app = Flask(__name__)
app.config.from_object(__name__)
@app.route('/')
def welcome():
return '<h1>Welcome to COMPUT 410 - Jinja Lab!</h1>'
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'invalid password'
else:
session['logged_in'] = True
flash("You are logged in :-)")
return redirect(url_for('task'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in')
flash("You are logged out!")
return redirect(url_for('task'))
@app.route('/delete', methods=['GET', 'POST'])
def delete():
if not session.get('logged_in'):
abort(401)
removetask(request.form['id'])
flash('Task was deleted successfully!')
return redirect(url_for('task'))
@app.route('/task', methods=['GET', 'POST'])
def task():
if request.method == 'POST':
if not session.get('logged_in'):
abort(401)
category = request.form['category']
priority = request.form['priority']
description = request.form['description']
addtask(category, priority, description)
flash("New task added successfully")
return redirect(url_for('task'))
return render_template('show_entries.html', tasks=query_db('select * from tasks'))
def query_db(query, args=(), one=False):
cur = get_db().cursor()
cur.execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row
return db
def removetask(id):
query_db('delete from tasks where id = ?', [id], True)
get_db().commit()
def addtask(category, priority, description):
query_db('insert into tasks values (null, ?, ?, ?)', [category, priority, description], True)
get_db().commit()
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
db = None
if __name__ == '__main__':
app.debug = True
app.run() | apache-2.0 | 7,709,553,751,303,551,000 | 27.402174 | 97 | 0.609495 | false |
Azure/azure-storage-python | samples/blob/block_blob_usage.py | 1 | 18393 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import io
import os
import random
import time
import uuid
from azure.storage.blob import (
ContentSettings,
BlobBlock,
BlockListType,
)
class BlockBlobSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_block_blob_service()
self.delete_blob()
self.blob_metadata()
self.blob_properties()
self.blob_exists()
self.copy_blob()
self.snapshot_blob()
self.lease_blob()
self.blob_with_bytes()
self.blob_with_stream()
self.blob_with_path()
self.blob_with_text()
self.blocks()
def _get_resource_reference(self, prefix):
return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
def _get_blob_reference(self, prefix='blob'):
return self._get_resource_reference(prefix)
def _create_blob(self, container_name, prefix='blob'):
blob_name = self._get_resource_reference(prefix)
self.service.create_blob_from_text(container_name, blob_name, u'hello world')
return blob_name
def _create_container(self, prefix='container'):
container_name = self._get_resource_reference(prefix)
self.service.create_container(container_name)
return container_name
def _get_random_bytes(self, size):
rand = random.Random()
result = bytearray(size)
for i in range(size):
result[i] = rand.randint(0, 255)
return bytes(result)
def delete_blob(self):
container_name = self._create_container()
blob_name = self._create_blob(container_name)
# Basic
self.service.delete_blob(container_name, blob_name)
self.service.delete_container(container_name)
def blob_metadata(self):
container_name = self._create_container()
blob_name = self._create_blob(container_name)
metadata = {'val1': 'foo', 'val2': 'blah'}
# Basic
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
metadata = {'new': 'val'}
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'new': 'val'}
# Capital letters
metadata = {'NEW': 'VAL'}
self.service.set_blob_metadata(container_name, blob_name, metadata=metadata)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={'new': 'VAL'}
# Clearing
self.service.set_blob_metadata(container_name, blob_name)
metadata = self.service.get_blob_metadata(container_name, blob_name) # metadata={}
self.service.delete_container(container_name)
def blob_properties(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
metadata = {'val1': 'foo', 'val2': 'blah'}
self.service.create_blob_from_text(container_name, blob_name, u'hello world', metadata=metadata)
settings = ContentSettings(content_type='html', content_language='fr')
# Basic
self.service.set_blob_properties(container_name, blob_name, content_settings=settings)
blob = self.service.get_blob_properties(container_name, blob_name)
content_language = blob.properties.content_settings.content_language # fr
content_type = blob.properties.content_settings.content_type # html
content_length = blob.properties.content_length # 512
# Metadata
# Can't set metadata, but get will return metadata already on the blob
blob = self.service.get_blob_properties(container_name, blob_name)
metadata = blob.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
# Replaces values, does not merge
settings = ContentSettings(content_encoding='utf-8')
self.service.set_blob_properties(container_name, blob_name, content_settings=settings)
blob = self.service.get_blob_properties(container_name, blob_name)
content_encoding = blob.properties.content_settings.content_encoding # utf-8
content_language = blob.properties.content_settings.content_language # None
self.service.delete_container(container_name)
def blob_exists(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Basic
exists = self.service.exists(container_name, blob_name) # False
self.service.create_blob_from_text(container_name, blob_name, u'hello world')
exists = self.service.exists(container_name, blob_name) # True
self.service.delete_container(container_name)
def copy_blob(self):
container_name = self._create_container()
source_blob_name = self._create_blob(container_name)
# Basic
# Copy the blob from the directory to the root of the container
source = self.service.make_blob_url(container_name, source_blob_name)
copy = self.service.copy_blob(container_name, 'blob1copy', source)
# Poll for copy completion
count = 0
while copy.status != 'success':
count = count + 1
if count > 5:
print('Timed out waiting for async copy to complete.')
time.sleep(30)
copy = self.service.get_blob_properties(container_name, 'blob1copy').properties.copy
# With SAS from a remote account to local blob
# Commented out as remote container, directory, blob, and sas would need to be created
'''
source_blob_url = self.service.make_blob_url(
remote_container_name,
remote_blob_name,
sas_token=remote_sas_token,
)
copy = self.service.copy_blob(destination_containername,
destination_blob_name,
source_blob_url)
'''
# Abort copy
# Commented out as this involves timing the abort to be sent while the copy is still running
# Abort copy is useful to do along with polling
# self.service.abort_copy_blob(container_name, blob_name, copy.id)
# Sync copy
# Set requires_sync=True to indicate that the service should not return a result until the blob is copied.
# This eliminates the need for polling.
self.service.copy_blob(container_name, 'blob1copy', source, requires_sync=True)
self.service.delete_container(container_name)
def snapshot_blob(self):
container_name = self._create_container()
base_blob_name = self._create_blob(container_name)
# Basic
snapshot_blob = self.service.snapshot_blob(container_name, base_blob_name)
snapshot_id = snapshot_blob.snapshot
# Set Metadata (otherwise metadata will be copied from base blob)
metadata = {'val1': 'foo', 'val2': 'blah'}
snapshot_blob = self.service.snapshot_blob(container_name, base_blob_name, metadata=metadata)
snapshot_id = snapshot_blob.snapshot
self.service.delete_container(container_name)
def lease_blob(self):
container_name = self._create_container()
blob_name1 = self._create_blob(container_name)
blob_name2 = self._create_blob(container_name)
blob_name3 = self._create_blob(container_name)
# Acquire
# Defaults to infinite lease
infinite_lease_id = self.service.acquire_blob_lease(container_name, blob_name1)
# Acquire
# Set lease time, may be between 15 and 60 seconds
fixed_lease_id = self.service.acquire_blob_lease(container_name, blob_name2, lease_duration=30)
# Acquire
# Proposed lease id
proposed_lease_id_1 = '55e97f64-73e8-4390-838d-d9e84a374321'
modified_lease_id = self.service.acquire_blob_lease(container_name,
blob_name3,
proposed_lease_id=proposed_lease_id_1,
lease_duration=30)
modified_lease_id # equal to proposed_lease_id_1
# Renew
# Resets the 30 second lease timer
# Note that the lease may be renewed even if it has expired as long as
# the container has not been leased again since the expiration of that lease
self.service.renew_blob_lease(container_name, blob_name3, proposed_lease_id_1)
# Change
# Change the lease ID of an active lease.
proposed_lease_id_2 = '55e97f64-73e8-4390-838d-d9e84a374322'
self.service.change_blob_lease(container_name, blob_name3, modified_lease_id,
proposed_lease_id=proposed_lease_id_2)
# Release
# Releasing the lease allows another client to immediately acquire the
# lease for the container as soon as the release is complete.
self.service.release_blob_lease(container_name, blob_name3, proposed_lease_id_2)
# Break
# A matching lease ID is not required.
# By default, a fixed-duration lease breaks after the remaining lease period
# elapses, and an infinite lease breaks immediately.
infinite_lease_break_time = self.service.break_blob_lease(container_name, blob_name1)
infinite_lease_break_time # 0
# Break
# By default this would leave whatever time remained of the 30 second
# lease period, but a break period can be provided to indicate when the
# break should take affect
lease_break_time = self.service.break_blob_lease(container_name, blob_name2, lease_break_period=10)
lease_break_time # 10
self.service.delete_container(container_name)
def blob_with_bytes(self):
container_name = self._create_container()
# Basic
data = b'hello world'
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data)
blob = self.service.get_blob_to_bytes(container_name, blob_name)
content = blob.content # hello world
# Download range
blob = self.service.get_blob_to_bytes(container_name, blob_name,
start_range=3, end_range=10)
content = blob.content # data from 3-10
# Upload from index in byte array
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data, index=3)
# Content settings, metadata
settings = ContentSettings(content_type='html', content_language='fr')
metadata = {'val1': 'foo', 'val2': 'blah'}
blob_name = self._get_blob_reference()
self.service.create_blob_from_bytes(container_name, blob_name, data, content_settings=settings,
metadata=metadata)
blob = self.service.get_blob_to_bytes(container_name, blob_name)
metadata = blob.metadata # metadata={'val1': 'foo', 'val2': 'blah'}
content_language = blob.properties.content_settings.content_language # fr
content_type = blob.properties.content_settings.content_type # html
# Progress
# Use slightly larger data so the chunking is more visible
data = self._get_random_bytes(8 * 1024 * 1024)
def upload_callback(current, total):
print('({}, {})'.format(current, total))
def download_callback(current, total):
print('({}, {}) '.format(current, total))
blob_name = self._get_blob_reference()
print('upload: ')
self.service.create_blob_from_bytes(container_name, blob_name, data, progress_callback=upload_callback)
print('download: ')
blob = self.service.get_blob_to_bytes(container_name, blob_name,
progress_callback=download_callback)
self.service.delete_container(container_name)
def blob_with_stream(self):
container_name = self._create_container()
# Basic
input_stream = io.BytesIO(self._get_random_bytes(15))
output_stream = io.BytesIO()
blob_name = self._get_blob_reference()
self.service.create_blob_from_stream(container_name, blob_name, input_stream, 15)
blob = self.service.get_blob_to_stream(container_name, blob_name,
output_stream)
content_length = blob.properties.content_length
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
def blob_with_path(self):
container_name = self._create_container()
INPUT_FILE_PATH = 'blob_input.temp.dat'
OUTPUT_FILE_PATH = 'blob_output.temp.dat'
data = self._get_random_bytes(4 * 1024)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
# Basic
blob_name = self._get_blob_reference()
self.service.create_blob_from_path(container_name, blob_name, INPUT_FILE_PATH)
blob = self.service.get_blob_to_path(container_name, blob_name, OUTPUT_FILE_PATH)
content_length = blob.properties.content_length
# Open mode
# Append to the blob instead of starting from the beginning
# Append streams are not seekable and so must be downloaded serially by setting max_connections=1.
blob = self.service.get_blob_to_path(container_name, blob_name, OUTPUT_FILE_PATH, open_mode='ab',
max_connections=1)
content_length = blob.properties.content_length # will be the same, but local blob length will be longer
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
if os.path.isfile(INPUT_FILE_PATH):
try:
os.remove(INPUT_FILE_PATH)
except:
pass
if os.path.isfile(OUTPUT_FILE_PATH):
try:
os.remove(OUTPUT_FILE_PATH)
except:
pass
def blob_with_text(self):
container_name = self._create_container()
# Basic
data = u'hello world'
blob_name = self._get_blob_reference()
self.service.create_blob_from_text(container_name, blob_name, data)
blob = self.service.get_blob_to_text(container_name, blob_name)
content = blob.content # 'hello world'
# Encoding
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
blob_name = self._get_blob_reference()
self.service.create_blob_from_text(container_name, blob_name, text, 'utf-16')
blob = self.service.get_blob_to_text(container_name, blob_name, 'utf-16')
content = blob.content # 'hello 啊齄丂狛狜 world'
# Download range
# Content settings, metadata
# Progress
# Parallelism
# See blob_with_bytes for these examples. The code will be very similar.
self.service.delete_container(container_name)
def blocks(self):
container_name = self._create_container()
blob_name = self._get_blob_reference()
# Put block
# Block id's must be the same length
self.service.put_block(container_name, blob_name, b'AAA', '1')
self.service.put_block(container_name, blob_name, b'BBB', '2')
self.service.put_block(container_name, blob_name, b'CCC', '3')
# Get Block List
# Defaults to committed only, specify all to get committed and uncommitted
block_list = self.service.get_block_list(container_name, blob_name,
block_list_type=BlockListType.All)
uncommitted = len(block_list.uncommitted_blocks) # 3
committed = len(block_list.committed_blocks) # 0
# Note the blob does not yet appears as blocks have not been committed
exists = self.service.exists(container_name, blob_name) # False
# Commit the blocks
# BlockBlock state defaults to Latest meaning the uncommitted and then
# the committed list is searched for the block id to commit
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
self.service.put_block_list(container_name, blob_name, block_list)
# Get Block List
# Defaults to committed only, specify all to get committed and uncommitted
block_list = self.service.get_block_list(container_name, blob_name,
block_list_type=BlockListType.All)
uncommitted = len(block_list.uncommitted_blocks) # 0
committed = len(block_list.committed_blocks) # 3
# Add a block
# Put the block
self.service.put_block(container_name, blob_name, b'DDD', '4')
# Get the existing blocks
block_list = self.service.get_block_list(container_name, blob_name,
block_list_type=BlockListType.All)
uncommitted = len(block_list.uncommitted_blocks) # 1
committed = len(block_list.committed_blocks) # 3
# Added the new block to the existing list and commit
new_block_list = block_list.committed_blocks
new_block_list.append(block_list.uncommitted_blocks[0])
self.service.put_block_list(container_name, blob_name, new_block_list)
self.service.delete_container(container_name)
| mit | 8,936,790,025,452,813,000 | 40.474041 | 120 | 0.618625 | false |
mbalazin/cse599c-17sp-projects | spark-advantage/pandasbench.py | 1 | 1488 |
# coding: utf-8
# In[1]:
import pandas as pd
import time
from sys import argv
logfile = argv[1]
filesize = argv[2]
# # Python Pandas Benchmark
# In[3]:
prefix = "file:////Users/tony/Dropbox/Projects/UW/cse599c-17sp-projects/spark-advantage/data/"
if(filesize == 'original'):
tairfname = "Tair_WA_nohead.csv"
tsoilfname = "Tsoil_WA_nohead.csv"
tsurfacefname = "Tsurface_WA_nohead.csv"
elif (filesize == 'medium'):
tairfname = "Tair_WA_nohead.MEDIUM.csv"
tsoilfname = "Tsoil_WA_nohead.MEDIUM.csv"
tsurfacefname = "Tsurface_WA_nohead.MEDIUM.csv"
elif (filesize == "small"):
tairfname = "Tair_WA_nohead.SMALL.csv"
tsoilfname = "Tsoil_WA_nohead.SMALL.csv"
tsurfacefname = "Tsurface_WA_nohead.SMALL.csv"
startTime = time.time()
tair = pd.read_csv(prefix+tairfname)
tsoil = pd.read_csv(prefix+tsoilfname)
tsurface = pd.read_csv(prefix+tsurfacefname)
joined = tair.merge(tsoil, on=["datetime", " lat", " lon"]).merge(tsurface, on=["datetime", " lat", " lon"])
joined.columns = [name.strip() for name in joined.columns]
joined[['lat', 'lon']] = joined[['lat', 'lon']].apply(pd.to_numeric)
seattle = joined[(joined['lon'] > -125.52) & \
(joined['lon'] < -120.2) & \
(joined['lat'] > 49.0) & \
(joined['lat'] < 51.0)]
seattle.groupby(by=['lat', 'lon'])['Tair'].mean()
exptime = time.time() - startTime
with open(logfile, 'a') as log:
log.write(str(exptime)+'\n') | bsd-3-clause | -4,992,153,284,442,744,000 | 24.672414 | 108 | 0.623656 | false |
openai/baselines | baselines/ppo1/run_atari.py | 1 | 1583 | #!/usr/bin/env python3
from mpi4py import MPI
from baselines.common import set_global_seeds
from baselines import bench
import os.path as osp
from baselines import logger
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.cmd_util import atari_arg_parser
def train(env_id, num_timesteps, seed):
from baselines.ppo1 import pposgd_simple, cnn_policy
import baselines.common.tf_util as U
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() if seed is not None else None
set_global_seeds(workerseed)
env = make_atari(env_id)
def policy_fn(name, ob_space, ac_space): #pylint: disable=W0613
return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space)
env = bench.Monitor(env, logger.get_dir() and
osp.join(logger.get_dir(), str(rank)))
env.seed(workerseed)
env = wrap_deepmind(env)
env.seed(workerseed)
pposgd_simple.learn(env, policy_fn,
max_timesteps=int(num_timesteps * 1.1),
timesteps_per_actorbatch=256,
clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
gamma=0.99, lam=0.95,
schedule='linear'
)
env.close()
def main():
args = atari_arg_parser().parse_args()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
| mit | -257,343,598,731,253,380 | 31.979167 | 87 | 0.670246 | false |
bptripp/it-cnn | tuning/clutter.py | 1 | 2870 | __author__ = 'bptripp'
import numpy as np
import matplotlib
matplotlib.rcParams['xtick.labelsize'] = 14
matplotlib.rcParams['ytick.labelsize'] = 14
import matplotlib.pyplot as plt
from cnn_stimuli import get_image_file_list
from alexnet import preprocess, load_net, load_vgg
def get_clutter_responses(remove_level):
model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_level)
use_vgg = False
# model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_level)
# use_vgg = True
bottom_dir = './images/clutter/bottom/'
bottom_image_files = get_image_file_list(bottom_dir, 'png', with_path=True)
bottom_out = model.predict(preprocess(bottom_image_files, use_vgg=use_vgg))
top_dir = './images/clutter/top/'
top_image_files = get_image_file_list(top_dir, 'png', with_path=True)
top_out = model.predict(preprocess(top_image_files, use_vgg=use_vgg))
pair_dir = './images/clutter/pair/'
pair_image_files = get_image_file_list(pair_dir, 'png', with_path=True)
pair_out = model.predict(preprocess(pair_image_files, use_vgg=use_vgg))
maxima = np.max(pair_out, axis=0)
n = 100
ind = (-maxima).argsort()[:n]
# n = 500
# ind = range(n)
sum_out = np.zeros_like(pair_out)
n_top = len(top_image_files)
n_bottom = len(bottom_image_files)
for i in range(n_top):
for j in range(n_bottom):
sum_out[i*n_bottom+j,:] = top_out[i,:] + bottom_out[j,:]
large_pair_out = pair_out[:,ind]
large_sum_out = sum_out[:,ind]
return large_pair_out, large_sum_out
if False:
remove_level = 1
large_pair_out, large_sum_out = get_clutter_responses(remove_level)
plt.figure(figsize=(4.5,4))
plt.scatter(large_sum_out, large_pair_out, marker='.', c='k')
plt.plot([0, 15], [0, 15], 'k--')
plt.plot([0, 15], [0, 7.5], 'k')
plt.xlim((0,16))
plt.ylim((0,16))
plt.xlabel('Sum of responses to single objects', fontsize=14)
plt.ylabel('Response to object pairs', fontsize=14)
plt.tight_layout()
plt.savefig('../figures/clutter-' + str(remove_level) + '.eps')
plt.show()
if True:
plt.figure(figsize=(6,2))
edges = np.linspace(0, np.pi/2, 20)
for remove_level in range(3):
plt.subplot(1,3,remove_level+1)
large_pair_out, large_sum_out = get_clutter_responses(remove_level)
angle = np.arctan((large_pair_out.flatten() + 1e-6) / (large_sum_out.flatten() + 1e-6))
plt.hist(angle, edges, color=[.5,.5,.5])
# if remove_level == 1:
# plt.xlabel('Angle from horizontal (radians)', fontsize=14)
plt.yticks([])
plt.xticks([0, np.pi/4], ['0', 'pi/4'])
plt.plot([np.arctan(.5), np.arctan(.5)], plt.gca().get_ylim(), 'r')
plt.tight_layout()
plt.savefig('../figures/clutter-angles.eps')
plt.show() | mit | -5,060,137,306,948,743,000 | 35.341772 | 95 | 0.628223 | false |
amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_wireless_controller_hotspot20_h2qp_osu_provider.py | 1 | 13230 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_hotspot20_h2qp_osu_provider
short_description: Configure online sign up (OSU) provider list in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller_hotspot20 feature and h2qp_osu_provider category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.4
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_hotspot20_h2qp_osu_provider:
description:
- Configure online sign up (OSU) provider list.
default: null
type: dict
suboptions:
friendly_name:
description:
- OSU provider friendly name.
type: list
suboptions:
friendly_name:
description:
- OSU provider friendly name.
type: str
index:
description:
- OSU provider friendly name index.
required: true
type: int
lang:
description:
- Language code.
type: str
icon:
description:
- OSU provider icon. Source wireless-controller.hotspot20.icon.name.
type: str
name:
description:
- OSU provider ID.
required: true
type: str
osu_method:
description:
- OSU method list.
type: str
choices:
- oma-dm
- soap-xml-spp
- reserved
osu_nai:
description:
- OSU NAI.
type: str
server_uri:
description:
- Server URI.
type: str
service_description:
description:
- OSU service name.
type: list
suboptions:
lang:
description:
- Language code.
type: str
service_description:
description:
- Service description.
type: str
service_id:
description:
- OSU service ID.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure online sign up (OSU) provider list.
fortios_wireless_controller_hotspot20_h2qp_osu_provider:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wireless_controller_hotspot20_h2qp_osu_provider:
friendly_name:
-
friendly_name: "<your_own_value>"
index: "5"
lang: "<your_own_value>"
icon: "<your_own_value> (source wireless-controller.hotspot20.icon.name)"
name: "default_name_8"
osu_method: "oma-dm"
osu_nai: "<your_own_value>"
server_uri: "<your_own_value>"
service_description:
-
lang: "<your_own_value>"
service_description: "<your_own_value>"
service_id: "15"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_hotspot20_h2qp_osu_provider_data(json):
option_list = ['friendly_name', 'icon', 'name',
'osu_method', 'osu_nai', 'server_uri',
'service_description']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_hotspot20_h2qp_osu_provider(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_hotspot20_h2qp_osu_provider_data = data['wireless_controller_hotspot20_h2qp_osu_provider']
filtered_data = underscore_to_hyphen(filter_wireless_controller_hotspot20_h2qp_osu_provider_data(wireless_controller_hotspot20_h2qp_osu_provider_data))
if state == "present":
return fos.set('wireless-controller.hotspot20',
'h2qp-osu-provider',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller.hotspot20',
'h2qp-osu-provider',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller_hotspot20(data, fos):
if data['wireless_controller_hotspot20_h2qp_osu_provider']:
resp = wireless_controller_hotspot20_h2qp_osu_provider(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_hotspot20_h2qp_osu_provider": {
"required": False, "type": "dict", "default": None,
"options": {
"friendly_name": {"required": False, "type": "list",
"options": {
"friendly_name": {"required": False, "type": "str"},
"index": {"required": True, "type": "int"},
"lang": {"required": False, "type": "str"}
}},
"icon": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"osu_method": {"required": False, "type": "str",
"choices": ["oma-dm", "soap-xml-spp", "reserved"]},
"osu_nai": {"required": False, "type": "str"},
"server_uri": {"required": False, "type": "str"},
"service_description": {"required": False, "type": "list",
"options": {
"lang": {"required": False, "type": "str"},
"service_description": {"required": False, "type": "str"},
"service_id": {"required": False, "type": "int"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,820,145,933,093,998,000 | 32.324937 | 155 | 0.552079 | false |
icyflame/batman | pywikibot/families/wikibooks_family.py | 1 | 7244 | # -*- coding: utf-8 -*-
"""Family module for Wikibooks."""
from __future__ import absolute_import, unicode_literals
from pywikibot import family
__version__ = '$Id$'
# The Wikimedia family that is known as Wikibooks
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family class for Wikibooks."""
name = 'wikibooks'
closed_wikis = [
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Afar_Wikibooks
'aa',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Akan_Wikibooks
'ak',
# https://als.wikipedia.org/wiki/Wikipedia:Stammtisch/Archiv_2008-1#Afterwards.2C_closure_and_deletion_of_Wiktionary.2C_Wikibooks_and_Wikiquote_sites
'als',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Assamese_Wikibooks
'as',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Asturianu_Wikibooks
'ast',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Aymar_Wikibooks
'ay',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bashkir_Wikibooks
'ba',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bislama_Wikibooks
'bi',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bambara_Wikibooks
'bm',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Tibetan_Wikibooks
'bo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Chamorro_Wikibooks
'ch',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Corsu_Wikibooks
'co',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gaeilge_Wikibooks
'ga',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gothic_Wikibooks
'got',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Guarani_Wikibooks
'gn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gujarati_Wikibooks
'gu',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kannada_Wikibooks
'kn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kashmiri_Wikibooks
'ks',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_L%C3%ABtzebuergesch_Wikibooks
'lb',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Lingala_Wikibooks
'ln',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Latvian_Wikibooks
'lv',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Maori_Wikibooks
'mi',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Mongolian_Wikibooks
'mn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Burmese_Wikibooks
'my',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nauruan_Wikibooks
'na',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nahuatl_Wikibooks
'nah',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Plattd%C3%BC%C3%BCtsch_Wikibooks
'nds',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Pashto_Wikibooks
'ps',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Quechua_Wikibooks
'qu',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Rumantsch_Wikibooks
'rm',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Sami_Wikibooks
'se',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Simple_English_Wikibooks_(3)
'simple',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Basa_Sunda_Wikibooks_(2)
'su',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Swahili_Wikibooks
'sw',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Turkmen_Wikibooks
'tk',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Uyghur_Wikibooks
'ug',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Volap%C3%BCk_Wikibooks
'vo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Walon_Wikibooks
'wa',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Xhosa_Wikibooks
'xh',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Yoruba_Wikibooks
'yo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Zhuang_Wikibooks
'za',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Zulu_Wikibooks
'zu',
]
removed_wikis = [
'tokipona',
]
def __init__(self):
"""Constructor."""
self.languages_by_size = [
'en', 'de', 'fr', 'hu', 'ja', 'it', 'es', 'pt', 'nl', 'pl', 'he',
'vi', 'ca', 'id', 'sq', 'fi', 'ru', 'fa', 'cs', 'zh', 'sv', 'hr',
'tr', 'ro', 'sr', 'ar', 'no', 'th', 'ko', 'gl', 'da', 'ta', 'mk',
'az', 'tl', 'is', 'ka', 'lt', 'tt', 'uk', 'eo', 'bg', 'sk', 'sl',
'el', 'hy', 'ms', 'sa', 'si', 'li', 'la', 'ml', 'ur', 'bn', 'ang',
'ia', 'cv', 'et', 'hi', 'km', 'mr', 'eu', 'oc', 'kk', 'fy', 'ne',
'ie', 'te', 'af', 'tg', 'ky', 'bs', 'pa', 'be', 'mg', 'cy',
'zh-min-nan', 'ku', 'uz',
]
super(Family, self).__init__()
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = [
'af', 'ang', 'ca', 'fa', 'fy', 'it', 'nl', 'ru', 'th', 'zh',
]
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are
# put after those, in code-alphabetical order.
self.interwiki_putfirst = {
'en': self.alphabetic,
'fi': self.alphabetic,
'fr': self.alphabetic,
'he': ['en'],
'hu': ['en'],
'pl': self.alphabetic,
'simple': self.alphabetic
}
def shared_data_repository(self, code, transcluded=False):
"""Return the shared data repository for this family."""
return ('wikidata', 'wikidata')
| mit | -3,807,221,727,684,377,000 | 48.278912 | 157 | 0.628934 | false |
mpachas/django-monthfield | month/tests.py | 1 | 5197 | from django.test import TestCase
from month.models import Month
from example.models import Example
import datetime
# Create your tests here.
def TestMonthFunctions(TestCase):
def test_constructors(self):
m = Month(2010, 1)
self.assertEqual(m.year, 2010)
self.assertEqual(m.month, 1)
m = Month.from_string('2010-01')
self.assertEqual(m.year, 2010)
self.assertEqual(m.month, 1)
m = Month.from_date(datetime.date(year=2010, month=1, day=20))
self.assertEqual(m.year, 2010)
self.assertEqual(m.month, 1)
def test_addition(self):
m = Month(2010, 1)
x = m + 5
self.assertEqual(x.year, 2010)
self.assertEqual(x.month, 6)
x = m + 11
self.assertEqual(x.year, 2010)
self.assertEqual(x.month, 12)
x = m + 12
self.assertEqual(x.year, 2011)
self.assertEqual(x.month, 1)
x = m + 13
self.assertEqual(x.year, 2011)
self.assertEqual(x.month, 2)
x = m - 1
self.assertEqual(x.year, 2009)
self.assertEqual(x.month, 12)
x = m + 0
self.assertEqual(x.year, 2010)
self.assertEqual(x.month, 1)
x = m - 12
self.assertEqual(x.year, 2009)
self.assertEqual(x.month, 1)
x = m.next_month()
self.assertEqual(x.year, 2010)
self.assertEqual(x.month, 2)
x = m.prev_month()
self.assertEqual(x.year, 2009)
self.assertEqual(x.month, 12)
def test_firstday(self):
m = Month(2010, 1)
self.assertEqual(m.firstDay(), datetime.date(year=2010, month=1, day=1))
self.assertEqual(m.last_day(), datetime.date(year=2010, month=1, day=31))
m = Month(2010, 2)
self.assertEqual(m.firstDay(), datetime.date(year=2010, month=2, day=1))
self.assertEqual(m.last_day(), datetime.date(year=2010, month=2, day=28))
m = Month(2008, 2)
self.assertEqual(m.firstDay(), datetime.date(year=2008, month=2, day=1))
self.assertEqual(m.last_day(), datetime.date(year=2008, month=2, day=29))
def test_contains(self):
m = Month(2010, 1)
assert datetime.date(year=2010, month=1, day=1) in m
assert datetime.date(year=2010, month=1, day=10) in m
assert datetime.date(year=2010, month=1, day=31) in m
assert datetime.date(year=2010, month=2, day=1) not in m
assert datetime.date(year=2009, month=12, day=31) not in m
assert datetime.date(year=2009, month=1, day=31) not in m
assert datetime.date(year=2010, month=2, day=15) not in m
def test_int_conversion(self):
m = Month(2010, 1)
n = Month.from_int(int(m))
self.assertEqual(n.year, 2010)
self.assertEqual(n.month, 1)
def test_comparisons(self):
m = Month(2010, 1)
assert m == "2010-01-20"
assert m == "2010-01-20"
assert m == "2010-01"
assert m == "2010-01-20"
assert m < "2010-02-01"
assert m > "2009-12"
assert m > "2009-12-31"
p = m.prev_month()
n = m.next_month()
assert m == m
assert m <= m
assert m >= m
assert not m > m
assert not m < m
assert not m != m
assert not m == p
assert m > p
assert m >= p
assert not m <= p
assert not m < p
assert m != p
assert not m == n
assert m != n
assert m < n
assert m <= n
assert not m > n
assert not m >= n
class test_model_field(TestCase):
def test_queries(self):
e = Example(name='2010-01', month=Month(2010, 1))
e.save()
assert isinstance(e.month, Month)
assert e.month.month == 1
assert e.month.year == 2010
pk = e.pk
e = Example.objects.get(pk=pk)
assert isinstance(e.month, Month)
assert e.month.month == 1
assert e.month.year == 2010
e = Example(name='2010-01', month='2010-01')
e.save()
pk = e.pk
e = Example.objects.get(pk=pk)
assert isinstance(e.month, Month)
assert e.month.month == 1
assert e.month.year == 2010
e = Example(name='2010-01', month=datetime.date(year=2010, month=1, day=20))
e.save()
pk = e.pk
e = Example.objects.get(pk=pk)
assert isinstance(e.month, Month)
assert e.month.month == 1
assert e.month.year == 2010
Example.objects.all().delete()
for year in range(2001, 2011):
for month in range(1, 13):
name = "%s - %02d" %(year, month)
Example(name=name, month=Month(year, month)).save()
qs = Example.objects.filter(month='2005-12')
assert qs.exists()
assert qs.count() == 1
qs = Example.objects.filter(month__gte='2005-12')
assert qs.exists()
self.assertEqual(qs.count(), 61)
qs = Example.objects.filter(month__gt='2005-12')
assert qs.exists()
assert qs.count() == 60
def tearDown(self):
Example.objects.all().delete()
| bsd-3-clause | 6,722,813,776,884,100,000 | 28.196629 | 84 | 0.552819 | false |
brianjimenez/lightdock | bin/test/post/test_generate_glowworm_positions.py | 1 | 1125 | """Test for generate_glowworm_positions post script"""
import os
import filecmp
import shutil
from ..regression import RegressionTest
class TestGenerateGlowwormPositions(RegressionTest):
def setup(self):
self.path = os.path.dirname(os.path.realpath(__file__))
self.test_path = self.path + '/scratch_generate_glowworm_positions/'
self.ini_test_path()
self.golden_data_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__))) + \
'/golden_data/generate_glowworm_positions/'
def teardown(self):
self.clean_test_path()
def test_generate_conformations(self):
os.chdir(self.test_path)
shutil.copyfile(os.path.join(self.golden_data_path, 'gso_10.out'),
os.path.join(self.test_path, 'gso_10.out'))
command = "lgd_generate_glowworm_positions.py %s > test.out" % (os.path.join(self.test_path, 'gso_10.out'))
os.system(command)
assert filecmp.cmp(os.path.join(self.golden_data_path, 'gso_10.pdb'),
os.path.join(self.test_path, 'gso_10.pdb'))
| gpl-3.0 | 7,381,194,362,340,665,000 | 37.793103 | 115 | 0.629333 | false |
sh0ked/vmmaster | backend/queue_producer.py | 1 | 4076 | # coding: utf-8
import aioamqp
import uuid
import logging
from core.utils import async_wait_for
log = logging.getLogger(__name__)
class AsyncQueueProducer(object):
messages = {}
connection = None
channel = None
consumer_tag = None
responses_queue = None
commands_queue = None
def __init__(self, app):
self.app = app
async def connect(self):
params = {
'loop': self.app.loop,
'login': self.app.cfg.RABBITMQ_USER,
'password': self.app.cfg.RABBITMQ_PASSWORD,
'host': self.app.cfg.RABBITMQ_HOST,
'port': self.app.cfg.RABBITMQ_PORT
}
self.connection = await self.make_connection(params)
self.channel = await self.connection.channel()
self.responses_queue, self.consumer_tag = await self.create_queue_and_consume()
self.commands_queue = await self.create_queue(self.app.cfg.RABBITMQ_COMMAND_QUEUE)
async def create_queue(self, queue_name=None):
if not queue_name:
result = await self.channel.queue_declare(exclusive=True)
else:
result = await self.channel.queue_declare(queue_name=queue_name)
queue, messages, consumers = result.get('queue'), result.get('message_count'), result.get('consumer_count')
log.info("Queue %s was declared(messages: %s, consumers: %s)" % (queue, messages, consumers))
return queue
async def delete_queue(self, queue_name):
await self.channel.queue_delete(queue_name)
log.info('Queue %s was deleted' % queue_name)
async def create_queue_and_consume(self, queue_name=None):
if not queue_name:
queue_name = await self.create_queue()
else:
await self.create_queue(queue_name)
consumer_tag = await self.queue_consume(queue_name)
return queue_name, consumer_tag
@staticmethod
async def make_connection(params):
transport, connection = await aioamqp.connect(**params)
return connection
async def queue_consume(self, queue_name):
log.info("Start consuming for queue %s" % queue_name)
await self.channel.basic_consume(
callback=self.on_message, queue_name=queue_name, no_ack=False
)
async def on_message(self, channel, body, envelope, properties):
log.debug("Got new message %s" % body)
for correlation_id in list(self.messages.keys()):
if correlation_id == properties.correlation_id:
log.info("Response with corr_id %s from queue %s: %s" % (correlation_id, self.responses_queue, body))
self.messages[correlation_id]["response"] = body
channel.basic_client_ack(delivery_tag=envelope.delivery_tag)
async def add_msg_to_queue(self, queue_name, msg):
correlation_id = str(uuid.uuid4())
await self.channel.basic_publish(
payload=str(msg),
exchange_name='',
routing_key=queue_name,
properties={
"reply_to": self.responses_queue,
"correlation_id": correlation_id
})
log.info("Message(id:%s body: %s) was published to %s" % (correlation_id, msg, queue_name))
self.messages[correlation_id] = {"request": msg, "response": None}
return correlation_id
async def get_message_from_queue(self, correlation_id):
log.info("Waiting response for message with id: %s" % correlation_id)
response = await async_wait_for(
lambda: self.messages.get(correlation_id).get("response"),
self.app.loop,
timeout=self.app.cfg.BACKEND_REQUEST_TIMEOUT
)
del self.messages[correlation_id]
log.info("Got response %s for message with id: %s" % (response, correlation_id))
return response
async def add_msg_to_queue_with_response(self, queue_name, msg):
correlation_id = await self.add_msg_to_queue(queue_name, msg)
response = await self.get_message_from_queue(correlation_id)
return response
| mit | 1,020,926,549,248,728,000 | 38.192308 | 117 | 0.626349 | false |
fsmMLK/inkscapeCircuitSymbols | 0.9x/drawRLC.py | 1 | 14781 | #!/usr/bin/python
import inkscapeMadeEasy_Base as inkBase
import inkscapeMadeEasy_Draw as inkDraw
class RLC(inkBase.inkscapeMadeEasy):
# ---------------------------------------------
def drawBipoleGeneral(self, parent, position=[0, 0], value='Z', label='Bipole', angleDeg=0, flagVolt=True,
voltName='v', flagCurr=True, currName='i', invertArrows=False, convention='passive'):
""" draws a generic bipole with a rectangle
parent: parent object
position: position [x,y]
value: string with resistor value. (default 'Z')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group)
inkDraw.line.relCoords(elem, [[15.5, 0]], position)
inkDraw.line.relCoords(elem, [[19, 0], [0, -6], [-19, 0], [0, 6]], [position[0] + 15.5, position[1] + 3])
inkDraw.line.relCoords(elem, [[15.5, 0]], [position[0] + 34.5, position[1]])
pos_text = [position[0] + 25, position[1] - 3 - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, [position[0] + 40, position[1] - 5], name=currName, color=self.currentColor,
angleDeg=angleDeg, invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawResistor(self, parent, position=[0, 0], value='R', label='Resistor', angleDeg=0, flagVolt=True,
voltName='v', flagCurr=True, currName='i', invertArrows=False, convention='passive'):
""" draws a resistor
parent: parent object
position: position [x,y]
value: string with resistor value. If it ends with 'ohm', 'OHM' or 'Ohm', proper Ohm symbol will be added. (Default 'R')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group)
inkDraw.line.relCoords(elem, [[15.5, 0], [2, 3], [3, -6], [3, 6], [3, -6], [3, 6], [3, -6], [2, 3], [15.5, 0]],
position)
pos_text = [position[0] + 25, position[1] - 3 - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, [position[0] + 40, position[1] - 5], name=currName, color=self.currentColor,
angleDeg=angleDeg, invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawPotentiometer(self, parent, position=[0, 0], value='R', label='Potentiometer', angleDeg=0, flagVolt=True,
voltName='v', flagCurr=True, currName='i', invertArrows=False, is3T=False,
convention='passive'):
""" draws a potentiometer
parent: parent object
position: position [x,y]
value: string with resistor value.
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
is3T: indicates the drawPotentiometer has 3 terminals (default:false)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group)
# build arrow marker
colorBlack = inkDraw.color.defined('black')
L_arrow = 2.5
markerPath = 'M 0,0 l -%f,%f l 0,-%f z' % (L_arrow * 1.2, L_arrow / 2.0, L_arrow)
markerArrow = inkDraw.marker.createMarker(self, 'BJTArrow', markerPath, RenameMode=1, strokeColor=colorBlack,
fillColor=colorBlack, lineWidth=0.6,
markerTransform='translate (1,0)')
lineStyleArrow = inkDraw.lineStyle.set(lineWidth=1, lineColor=colorBlack, markerEnd=markerArrow)
inkDraw.line.relCoords(elem, [[15.5, 0], [2, 3], [3, -6], [3, 6], [3, -6], [3, 6], [3, -6], [2, 3], [15.5, 0]],
position)
# 2-terminal Potentiometer
if is3T:
inkDraw.line.relCoords(elem, [[0, -10]], [position[0] + 25, position[1] + 15], lineStyle=lineStyleArrow)
pos_text = [position[0] + 25, position[1] - 3 - self.textOffset]
else:
inkDraw.line.relCoords(elem, [[20, -12]], [position[0] + 15, position[1] + 6], lineStyle=lineStyleArrow)
pos_text = [position[0] + 25, position[1] - 6 - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if is3T:
pos = [position[0] + 25, position[1] - 13]
invertCurvature = True
else:
pos = [position[0] + 25, position[1] + 8]
invertCurvature = False
if convention == 'passive':
self.drawVoltArrowSimple(group, pos, name=voltName, color=self.voltageColor, angleDeg=0,
invertArrows=invertArrows, invertCurvatureDirection=invertCurvature)
if convention == 'active':
self.drawVoltArrowSimple(group, pos, name=voltName, color=self.voltageColor, angleDeg=0,
invertArrows=not invertArrows, invertCurvatureDirection=invertCurvature)
if flagCurr:
if is3T:
pos = [position[0] + 40, position[1] - 5]
else:
pos = [position[0] + 42, position[1] - 5]
self.drawCurrArrow(group, pos, name=currName, color=self.currentColor, angleDeg=angleDeg,
invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawCapacitor(self, parent, position=[0, 0], value='C', label='Capacitor', flagPol=False, angleDeg=0,
flagVolt=True, voltName='v', flagCurr=True, currName='i', invertArrows=False,
convention='passive'):
""" draws a capacitor
parent: parent object
position: position [x,y]
value: string with value.
label: label of the object (it can be repeated)
flagPol: draw sign for polarized capacitor
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group, label)
inkDraw.line.relCoords(elem, [[23, 0]], position)
inkDraw.line.relCoords(elem, [[-23, 0]], [position[0] + 50, position[1]])
inkDraw.line.relCoords(elem, [[0, -14]], [position[0] + 23, position[1] + 7])
inkDraw.line.relCoords(elem, [[0, -14]], [position[0] + 27, position[1] + 7])
pos_text = [position[0] + 25, position[1] - 8 - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if flagPol:
inkDraw.text.write(self, '+', [position[0] + 31, position[1] - 3], group, self.textStyle, fontSize=5)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 9], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 9], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, [position[0] + 40, position[1] - 5], name=currName, color=self.currentColor,
angleDeg=angleDeg, invertArrows=invertArrows)
return group
# ---------------------------------------------
def drawInductor(self, parent, position=[0, 0], value='L', label='Inductro', angleDeg=0, flagVolt=True,
voltName='v', flagCurr=True, currName='i', invertArrows=False, convention='passive'):
""" draws an inductor
parent: parent object
position: position [x,y]
value: string with resistor value. If it ends with 'ohm', 'OHM' or 'Ohm', proper Ohm symbol will be added. (Default 'R')
label: label of the object (it can be repeated)
angleDeg: rotation angle in degrees counter-clockwise (default 0)
flagVolt: indicates whether the voltage arrow must be drawn (default: true)
voltName: voltage drop name (default: v)
flagCurr: indicates whether the current arrow must be drawn (default: true)
currName: current drop name (default: i)
invertArrows: invert V/I arrow directions (default: False)
convention: passive/active sign convention. available types: 'passive' (default) , 'active'
"""
group = self.createGroup(parent, label)
elem = self.createGroup(group, label)
inkDraw.line.relCoords(elem, [[13, 0]], position)
inkDraw.line.relCoords(elem, [[-13, 0]], [position[0] + 50, position[1]])
inkDraw.arc.centerAngStartAngEnd(elem, [position[0] + 16, position[1]], 3.0, 0.0, 180.0, [0, 0], flagOpen=True,
largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem, [position[0] + 22, position[1]], 3.0, 0.0, 180.0, [0, 0], flagOpen=True,
largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem, [position[0] + 28, position[1]], 3.0, 0.0, 180.0, [0, 0], flagOpen=True,
largeArc=False)
inkDraw.arc.centerAngStartAngEnd(elem, [position[0] + 34, position[1]], 3.0, 0.0, 180.0, [0, 0], flagOpen=True,
largeArc=False)
pos_text = [position[0] + 25, position[1] - self.textOffset]
if inkDraw.useLatex:
value = '$' + value + '$'
inkDraw.text.latex(self, group, value, pos_text, fontSize=self.fontSize, refPoint='bc',
preambleFile=self.preambleFile)
if angleDeg != 0:
self.rotateElement(group, position, angleDeg)
if flagVolt:
if convention == 'passive':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=not invertArrows)
if convention == 'active':
self.drawVoltArrow(group, [position[0] + 25, position[1] + 5], name=voltName, color=self.voltageColor,
angleDeg=angleDeg, invertArrows=invertArrows)
if flagCurr:
self.drawCurrArrow(group, [position[0] + 40, position[1] - 5], name=currName, color=self.currentColor,
angleDeg=angleDeg, invertArrows=invertArrows)
return group
| gpl-3.0 | -5,235,465,511,251,690,000 | 48.767677 | 128 | 0.577633 | false |
Iconoclasteinc/tgit | testing/drivers/track_list_tab_driver.py | 1 | 3521 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QMenu, QTableWidget
from hamcrest import contains, has_items, equal_to
from cute import gestures
from cute.matchers import named
from cute.widgets import MenuDriver, TableViewDriver
from tgit.ui.pages.track_list_tab import TrackListTab
from ._screen_driver import ScreenDriver
def track_list_tab(parent):
return TrackListTabDriver.find_single(parent, TrackListTab, named("track_list_tab"))
class TrackListTabDriver(ScreenDriver):
def __init__(self, selector, prober, gesture_performer):
super().__init__(selector, prober, gesture_performer)
def shows_column_headers(self, *headers):
self._track_table().has_headers(contains(*headers))
def shows_track_details(self, *details):
return self._track_table().has_row(has_items(*details))
def has_selected_track(self, *cells):
return self._track_table().has_selected_row(has_items(*cells))
def shows_tracks_in_order(self, *tracks):
rows = [has_items(*[column for column in track]) for track in tracks]
return self._track_table().contains_rows(contains(*rows))
def has_track_count(self, count):
self._track_table().has_row_count(equal_to(count))
def add_tracks(self):
self.button(named("_add_tracks_button")).click()
def has_context_menu_item(self, matching):
context_menu = self._from_context_menu()
context_menu.has_menu_item(matching)
context_menu.close()
@property
def remove_button(self):
return self.button(named("_remove_track_button"))
@property
def move_up_button(self):
return self.button(named("_move_track_up_button"))
@property
def move_down_button(self):
return self.button(named("_move_track_down_button"))
def has_disabled_play_context_menu_item(self, title):
self.select_track(title)
self._from_context_menu().menu_item(named("_play_action")).is_disabled()
def _from_context_menu(self):
self.perform(gestures.mouse_right_click())
return MenuDriver.find_single(self, QMenu, named("context_menu"))
def select_track(self, title):
row = self.shows_track_details(title)
self._track_table().click_on_cell(row, 0)
def play_track(self, title):
self.select_track(title)
self._from_context_menu().select_menu_item(named("_play_action"))
def stop_track(self, title):
self.select_track(title)
self._from_context_menu().select_menu_item(named("_stop_action"))
def remove_selected_track(self, using="shortcut"):
if using == "shortcut":
self.perform(gestures.delete_previous())
elif using == "menu":
self._from_context_menu().select_menu_item(named("_remove_action"))
elif using == "button":
self.remove_button.click()
else:
raise AssertionError("Don't know how to remove a track using {}", using)
def remove_track(self, title):
self.select_track(title)
self.remove_selected_track()
def move_track(self, title, to):
from_ = self.shows_track_details(title)
self._track_table().move_row(from_, to)
def move_track_up(self):
self.move_up_button.click()
def move_track_down(self):
self.move_down_button.click()
def _track_table(self):
table = TableViewDriver.find_single(self, QTableWidget, named('_track_table'))
table.is_showing_on_screen()
return table
| gpl-3.0 | 5,953,029,582,942,851,000 | 33.184466 | 88 | 0.652655 | false |
nict-isp/scn-openflow-driver | src/ncps_openflow/protocols/application/tcp.py | 1 | 13729 | # -*- coding: utf-8 -*-
"""
protocols.application.tcp
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright (c) 2015, National Institute of Information and Communications Technology.All rights reserved.
:license: GPL3, see LICENSE for more details.
"""
import logging
from protocols.application.application import Server
from protocols.application.application import Client
from protocols import ipv4 as Ipv4Agent
from protocols import tcp as TcpAgent
from protocols.tcp import TcpSegment
from protocols.tcp import TcpConnection
from pox.core import core
from pox.lib.packet.tcp import tcp
from random import randint
log = logging.getLogger('protocols.application.tcp')
class TcpServer(Server):
"""Implements a basic TCP Server which handles raw TCP packets passed to it."""
protocol = tcp
agent = TcpAgent
def __init__(self, lport, max_active_conns=1000): #was max_active_conns=250
"""port is the port the TCPServer should listen for SYN packets on."""
assert lport>=0 and lport<65536, "Port must be between 0 and 65536 (exclusive)"
self.lport = lport
self.connections = {}
self.max_active_conns = max_active_conns
Server.__init__(self)
def processPacket(self, packet, *args, **kwargs):
tcpPkt = self.agent.extract(packet)
if tcpPkt is None:
return None
if not self.matches(packet, *args, **kwargs):
return None
for f in self.filters:
packet = f.packetIn(packet)
conn = self.getConnection(packet, *args, **kwargs)
if not conn:
return None
conn = self.processTcpPkt(conn, packet, tcpPkt)
if not conn or conn.closed:
return None
conn = self.processTcpData(conn, packet, tcpPkt, *args, **kwargs)
_tcpPkt = self.getReply(packet, *args, **kwargs)
if _tcpPkt is None:
return None
resp = self.agent.buildStandardTcpResponse(packet, _tcpPkt, payload=_tcpPkt.next, ipId=None if not conn.ipId else conn.ipId+1)
if conn.ipId is None:
ipId = Ipv4Agent.extractId(resp)
if ipId == 0:
ipId = randint(0, 2**16-1)
conn.ipId = ipId
conn.ipId += 1
return resp
def matches(self, packet, *args, **kwargs):
dstip, dstport = self.agent.extractDst(packet)
if dstport == self.lport:
return True
return False
def getReply(self, packet, *args, **kwargs):
conn = self.getConnection(packet, *args, **kwargs)
if conn is None:
return None
pkts = conn.get_packets_to_send()
if not pkts:
return
return pkts[0]
def getConnection(self, packet, *args, **kwargs):
socPair = self.agent.extractConnection(packet)
key = self.agent.socPairInt(socPair)
conn = self.connections.get(key)
if not conn:
conn = self.createConnection(packet, socPair, *args, **kwargs)
if conn is None:
return conn
self.connections[key] = conn
log.debug("{TcpServer} adding the %dth connection [%s]" % (len(self.connections), key))
return conn
def createConnection(self, packet, socPair, *args, **kwargs):
if len(self.connections) >= self.max_active_conns:
s = 'Ignoring new connection request:'
s += 'already have %d active connections'
log.warn(s % self.max_active_conns)
return None
if not self.agent.isSyn(packet):
return None
_kwargs = {}
_kwargs.update(kwargs)
_kwargs['connection_over_cb'] = self.connectionClosed
_kwargs['has_data_to_send_cb'] = self.connHasDataToSend
conn = TcpConnection.createFromPacket(packet, **_kwargs)
return conn
def connHasDataToSend(self, conn):
if conn is None:
return None
pkts = conn.get_packets_to_send()
if len(pkts)==0:
return None
tcpPkt = pkts[0]
pkt = self.agent.buildFrameFromConn(conn, tcpPkt)
self.sendConnectionPkt(conn, pkt)
def sendConnectionPkt(self, conn, pkt):
self.sendPkt(pkt)
def processTcpPkt(self, conn, packet, tcpPkt):
seq = self.agent.extractSeq(tcpPkt)
if seq is None:
return None
try:
if len(tcpPkt.next) > 0:
segment = TcpSegment(seq, tcpPkt.next)
conn.add_segment(segment)
except Exception as inst:
log.exception(inst)
conn.close()
return None
if self.agent.isFin(tcpPkt):
conn.fin_received(seq)
window = self.agent.extractWin(tcpPkt)
if window is None:
return None
conn.window = max(1460, window) # ignore requests to shrink the window below an MTU
if self.agent.isAck(tcpPkt):
ack = self.agent.extractAck(tcpPkt)
if ack is None:
return None
conn.set_ack(ack)
return conn
def processTcpData(self, conn, packet, tcpPkt, *args, **kwargs):
if not conn or conn.closed:
return conn
if not conn.has_ready_data():
return conn
data = conn.get_data()
self.payloadReceived(packet, data, *args, **kwargs)
conn.segments = []
return conn
def sendPayload(self, payload, *args, **kwargs):
""" TODO """
def connectionClosed(self, *args, **kwargs):
"""Called when it is ready to be removed. Removes the connection."""
if len(args) == 0:
return
conn = args[0]
if not conn:
return
socPair = conn.get_socket_pair()
socPair = socPair[::-1]
key = self.agent.socPairInt(socPair)
try:
conn = self.connections[key]
core.callDelayed(1, self.delConnection, key)
if not conn.closed:
conn.close()
except KeyError:
log.warn('Tried to remove connection which is not in our dictionary: %s' % str(key))
pass
def delConnection(self, key):
try:
del self.connections[key]
log.debug("Deleting the %dth connection [%s]" % (len(self.connections)+1, key))
except:
log.error("unable to delete this connection [%s]" % key)
pass
class OF_TcpServer(TcpServer):
def sendConnectionPkt(self, conn, pkt):
self.sendPkt(pkt, conn.dpid, conn.port)
def getConnection(self, packet, dpid, port, *args, **kwargs):
socPair = self.agent.extractConnection(packet)
key = self.agent.socPairInt(socPair)
conn = self.connections.get(key)
if not conn:
kwargs['dpid'] = dpid
kwargs['port'] = port
conn = self.createConnection(packet, socPair, *args, **kwargs)
if conn is None:
return conn
self.connections[key] = conn
log.debug("{OF_TcpServer} Adding the %dth connection [%s]" % (len(self.connections), key))
return conn
class TcpClient(Client, TcpConnection):
protocol = tcp
agent = TcpAgent
def __init__(self, src, dst, payload=''):
kwargs = {}
kwargs['my_mac'] = src[0]
kwargs['my_ip'] = src[1]
if len(src) == 3:
srcport = src[2]
kwargs['my_port'] = srcport
kwargs['other_mac'] = dst[0]
kwargs['other_ip'] = dst[1]
kwargs['other_port'] = dst[2]
kwargs['connection_over_cb'] = self.connectionClosed
kwargs['has_data_to_send_cb'] = self.connHasDataToSend
kwargs['payload'] = payload
self.d = None # deferred
TcpConnection.__init__(self, **kwargs)
Client.__init__(self)
def start(self):
tcpPkt = self.createSyn()
self.firstSYN = tcpPkt
packet = self.agent.buildFrameFromConn(self, tcpPkt)
self.sendPkt(packet)
def processPacket(self, packet, *args, **kwargs):
tcpPkt = self.agent.extract(packet)
if tcpPkt is None:
return None
if not self.matches(packet, *args, **kwargs):
return None
for f in self.filters:
packet = f.packetIn(packet)
if self.agent.isRst(tcpPkt) and not self.my_first_syn_acked:
self.doConnectionFailure()
return
self.processTcpPkt(packet, tcpPkt)
self.processTcpData(packet, tcpPkt, *args, **kwargs)
_tcpPkt = self.getReply(packet, *args, **kwargs)
if _tcpPkt is None:
return None
resp = self.agent.buildStandardTcpResponse(packet, _tcpPkt, payload=_tcpPkt.next, ipId=None if not self.ipId else self.ipId)
if self.ipId is None:
ipId = Ipv4Agent.extractId(resp)
if ipId == 0:
ipId = randint(0, 2**16-1)
del randint
self.ipId = ipId
self.ipId += 1
return resp
def matches(self, packet, *args, **kwargs):
src = (self.my_mac, self.my_ip, self.my_port)
dst = (self.other_mac, self.other_ip, self.other_port)
socPair = self.agent.extractConnection(packet)
if src != socPair[1]:
return False
if dst != socPair[0]:
return False
return True
def processTcpPkt(self, packet, tcpPkt):
ethPkt = packet
ipPkt = ethPkt.find('ipv4')
if tcpPkt.payload_len > 0 and not (ipPkt.iplen==tcpPkt.hdr_len+len(ipPkt.hdr(''))):
self.add_segment(TcpSegment(tcpPkt.seq, tcpPkt.next))
if self.agent.isFin(tcpPkt):
if not self.closed:
self.fin_received(tcpPkt.seq)
# remember window and latest ACK
self.window = max(1460, tcpPkt.win) # ignore requests to shrink the window below an MTU
if not self.agent.isAck(tcpPkt):
return
if not self.my_first_syn_acked:
self.my_first_syn_acked = True
self.my_syn_acked = True
self.need_to_send_ack = True
self.first_unacked_seq = tcpPkt.ack
self.next_seq_needed = tcpPkt.seq + 1
if self.agent.isFin(tcpPkt) and self.closed:
# it means we already sent a fin, ack and we just received a fin, ack
self.need_to_send_ack = True
self.last_seq_sent += 1
self.next_seq_needed += 1
self.set_ack(tcpPkt.ack)
else:
if self.my_first_syn_acked and not self.connected:
self.connected = True
core.callDelayed(0.01, self.connectionEstablished)
self.set_ack(tcpPkt.ack)
def processTcpData(self, packet, tcpPkt, *args, **kwargs):
if not self.has_ready_data():
return self
data = self.get_data()
self.payloadReceived(packet, data, *args, **kwargs)
self.segments = []
def getReply(self, packet, *args, **kwargs):
if not self.my_first_syn_acked:
return self.firstSYN
pkts = self.get_packets_to_send()
if not pkts:
return
return pkts[0]
def connHasDataToSend(self, conn):
if self != conn:
return None
pkts = self.get_packets_to_send()
if len(pkts) == 0:
return None
tcpPkt = pkts[0]
pkt = self.agent.buildFrameFromConn(self, tcpPkt)
self.sendPkt(pkt)
def connectionEstablished(self):
""" to be overriden """
# when syn, syn_ack, ack is finished
def connectionLost(self):
""" to be overriden """
def doConnectionFailure(self):
self.dead = True
self.connectionFailure()
def connectionFailure(self):
""" to be overriden """
def sendPayload(self, payload, *args, **kwargs):
i = 0
payloadLenth = len(payload)
while i < payloadLenth:
#TODO change 1460 by the real value for this connection
endOfSegment= min(i+1000, payloadLenth) #
dataToSend = payload[i:endOfSegment]
i = endOfSegment
tcpPkt = self.buildDataTransmissionAck(dataToSend)
packet = self.agent.buildFrameFromConn(self, tcpPkt)
self.sendPkt(packet)
self.last_seq_sent += len(dataToSend)
self.all_data_sent = True
tcpPkt = self.buildFin(self.last_seq_sent+1)
packet = self.agent.buildFrameFromConn(self, tcpPkt)
self.sendPkt(packet)
self.my_fin_sent = True
self.last_seq_sent += 1
self.payloadSent(payload, *args, **kwargs)
def connectionClosed(self, *args, **kwargs):
core.callDelayed(0.01, self.finished)
class OF_TcpClient(TcpClient):
def __init__(self, dpid, port, src, dst, payload=''):
self.dpid = dpid
self.port = port
TcpClient.__init__(self, src, dst, payload)
def matches(self, packet, dpid, port):
_dpid = getattr(self, 'dpid', None)
if _dpid is None:
self.dpid = dpid
if self.dpid != dpid:
return False
_port = getattr(self, 'port', None)
if _port is None:
self.port = port
if self.port != port:
return False
return TcpClient.matches(self, packet, dpid, port)
def sendPkt(self, pkt, *args, **kwargs):
if not self.sendCb:
return
TcpClient.sendPkt(self, pkt, self.dpid, self.port)
| gpl-3.0 | 1,546,076,728,354,864,000 | 27.365702 | 134 | 0.571127 | false |
csningli/MultiAgent | examples/avoid_static_obstacle/static_sim.py | 1 | 2287 |
# MultiAgent 2.0
# (c) 2017-2018, NiL, [email protected]
import sys, math
sys.path.append("../..")
from mas.multiagent import *
from mas.extension import ShowLabelObject
POS_ERROR = 5
SPIN_SPEED = math.pi / 6.0
class SpinModule(ObjectModule) :
def act(self, resp) :
resp.add_msg(Message(key = "avel", value = SPIN_SPEED))
super(SpinModule, self).act(resp)
class AvoidObstacleAgent(Agent) :
def __init__(self, name) :
super(AvoidObstacleAgent, self).__init__(name)
self.mods = [RadarModule(), SpinModule()]
def get_focus(self) :
focus_info = super(AvoidObstacleAgent, self).get_focus()
pos = self.mem.read("pos", None)
detect = self.mem.read("radar_detect", None)
if detect is not None :
for i, block in enumerate(detect) :
if pos is None or abs(block[2] - pos[0]) > POS_ERROR or abs(block[3] - pos[1]) > POS_ERROR :
focus_info["block_%d" % i] = "(%.1f, %.1f)" % (block[2], block[3])
else :
focus_info["detect"] = "none"
return focus_info
def run_sim(filename = None) :
'''
run_sim(filename = None)
------------------------
filename : the name of the file to save the data; None by default.
'''
# create the oracle space
oracle = OracleSpace()
# create the context
context = Context(oracle = oracle)
# create the schedule for adding agents in the running
schedule = Schedule()
# add objects and agents to the context
obt = Obstacle(name ="0", a = (50.0, -50.0), b = (50.0, 50.0), radius = 2.0)
context.add_obt(obt)
obj = ShowLabelObject(name = "0")
obj.pos = (0, 0)
context.add_obj(obj)
agent = AvoidObstacleAgent(name = "0")
agent.mem.reg("radar_dist", 100.0)
schedule.add_agent(agent)
# create the driver
driver = Driver(context = context, schedule = schedule)
# create the inspector
# inspector = Inspector(delay = 10)
# create the simulator
sim = Simulator(driver = driver)
print("Simulating")
sim.simulate(graphics = True, filename = filename)
if __name__ == '__main__' :
filename = None
if (len(sys.argv) > 1) :
filename = sys.argv[1]
run_sim(filename = filename)
| apache-2.0 | -4,195,351,703,851,307,000 | 23.591398 | 108 | 0.589418 | false |
olivierverdier/sfepy | sfepy/mechanics/matcoefs.py | 1 | 12857 | # -*- coding: utf-8 -*-
from sfepy.base.base import *
##
# c: 22.07.2008
def youngpoisson_to_lame( young, poisson, plane = 'strain' ):
r"""
The relationship between Lame parameters and Young's modulus, Poisson's
ratio (see [1],[2]):
.. math::
\lambda = {\nu E \over (1+\nu)(1-2\nu)},\qquad \mu = {E \over 2(1+\nu)}
The plain stress hypothesis:
.. math::
\bar\lambda = {2\lambda\mu \over \lambda + 2\mu}
[1] I.S. Sokolnikoff: Mathematical Theory of Elasticity. New York, 1956.
[2] T.J.R. Hughes: The Finite Element Method, Linear Static and Dynamic
Finite Element Analysis. New Jersey, 1987.
"""
mu = young/(2.0*(1.0 + poisson))
lam = young*poisson/((1.0 + poisson)*(1.0 - 2.0*poisson))
if plane == 'stress':
lam = 2*lam*mu/(lam + 2*mu)
return lam, mu
##
# c: 22.07.2008
def stiffness_tensor_lame( dim, lam, mu ):
r"""
Stiffness tensor - using Lame coefficients
.. math::
{\bm D}_{(2D)} = \begin{bmatrix} \lambda + 2\mu & \lambda & 0\\
\lambda & \lambda + 2\mu & 0\\ 0 & 0 & \mu \end{bmatrix}
.. math::
{\bm D}_{(3D)} = \begin{bmatrix} \lambda + 2\mu & \lambda &
\lambda & 0 & 0 & 0\\ \lambda & \lambda + 2\mu & \lambda & 0 & 0 & 0 \\
\lambda & \lambda & \lambda + 2\mu & 0 & 0 & 0 \\ 0 & 0 & 0 & \mu & 0 &
0 \\ 0 & 0 & 0 & 0 & \mu & 0 \\ 0 & 0 & 0 & 0 & 0 & \mu\\ \end{bmatrix}
"""
sym = (dim + 1) * dim / 2
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
return lam * oot + mu * nm.diag( o + 1.0 )
##
# c: 22.07.2008
def stiffness_tensor_youngpoisson( dim, young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return stiffness_tensor_lame( dim, lam, mu )
##
# c: 10.08.2009
def stiffness_tensor_lame_mixed( dim, lam, mu ):
r"""
Stiffness tensor - using Lame coefficients
.. math::
{\bm D}_{(2D)} = \begin{bmatrix} \widetilde\lambda + 2\mu &
\widetilde\lambda & 0\\ \widetilde\lambda & \widetilde\lambda + 2\mu &
0\\ 0 & 0 & \mu \end{bmatrix}
.. math::
{\bm D}_{(3D)} = \begin{bmatrix} \widetilde\lambda + 2\mu &
\widetilde\lambda & \widetilde\lambda & 0 & 0 & 0\\ \widetilde\lambda &
\widetilde\lambda + 2\mu & \widetilde\lambda & 0 & 0 & 0 \\
\widetilde\lambda & \widetilde\lambda & \widetilde\lambda + 2\mu & 0 &
0 & 0 \\ 0 & 0 & 0 & \mu & 0 & 0 \\ 0 & 0 & 0 & 0 & \mu & 0 \\ 0 & 0 &
0 & 0 & 0 & \mu\\ \end{bmatrix}
where
.. math::
\widetilde\lambda = {2\over 3} (\lambda - \mu)
"""
sym = (dim + 1) * dim / 2
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
return 2.0/3.0*(lam-mu) * oot + mu * nm.diag( o + 1.0 )
##
# c: 10.08.2009
def stiffness_tensor_youngpoisson_mixed( dim, young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return stiffness_tensor_lame_mixed( dim, lam, mu )
##
# c: 10.08.2009
def bulk_modulus_lame( lam, mu ):
r"""
Bulk modulus - using Lame coefficients
.. math::
\gamma = {1\over 3}(\lambda + 2\mu)
"""
return 1.0/3.0 * (2*mu + lam)
##
# c: 10.08.2009
def bulk_modulus_youngpoisson( young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return bulk_modulus_lame( lam, mu )
elastic_constants_relations = {
}
class ElasticConstants(Struct):
r"""
Conversion formulas for various groups of elastic constants. The elastic
constants supported are:
- :math:`E` : Young's modulus
- :math:`\nu` : Poisson's ratio
- :math:`K` : bulk modulus
- :math:`\lambda` : Lamé's first parameter
- :math:`\mu, G` : shear modulus, Lamé's second parameter
- :math:`M` : P-wave modulus, longitudinal wave modulus
The elastic constants are referred to by the following keyword arguments:
young, poisson, bulk, lam, mu, p_wave.
Exactly two of them must be provided to the __init__() method.
Examples
--------
- basic usage::
>>> from sfepy.mechanics.matcoefs import ElasticConstants
>>> ec = ElasticConstants(lam=1.0, mu=1.5)
>>> ec.young
3.6000000000000001
>>> ec.poisson
0.20000000000000001
>>> ec.bulk
2.0
>>> ec.p_wave
4.0
>>> ec.get(['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave'])
[2.0, 1.0, 1.5, 3.6000000000000001, 0.20000000000000001, 4.0]
- reinitialize existing instance::
>>> ec.init(p_wave=4.0, bulk=2.0)
>>> ec.get(['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave'])
[2.0, 1.0, 1.5, 3.6000000000000001, 0.20000000000000001, 4.0]
"""
def __init__(self, young=None, poisson=None, bulk=None, lam=None,
mu=None, p_wave=None, _regenerate_relations=False):
"""
Set exactly two of the elastic constants, and compute the remaining.
"""
self.names = ['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave']
if _regenerate_relations:
self.relations = self._construct_relations()
else:
from elastic_constants import relations
self.relations = relations
## print sorted(self.relations.keys())
## print len(self.relations)
self.init(young=young, poisson=poisson, bulk=bulk, lam=lam,
mu=mu, p_wave=p_wave)
def _construct_relations(self):
"""
Construct the dictionary of all relations among the six elastic
constants and save it as `elastic_constants.py` module, that can be
imported for reuse. Users should not call this!
"""
import sympy as sm
relations = {}
def _expand_keys(sols):
for key, val in sols.iteritems():
if len(val) == 2 and (key.name == 'poisson'):
val = val[0]
else:
val = val[-1]
skey = tuple(sorted([ii.name for ii in val.atoms()
if ii.is_Symbol])) + (key.name,)
if skey in relations:
print '!', skey
relations[skey] = val
bulk, lam, mu, young, poisson, p_wave = sm.symbols(self.names, real=True)
_expand_keys(sm.solve(bulk - (lam + 2 * mu / 3)))
_expand_keys(sm.solve(young - (mu * (3 * lam + 2 * mu) / (lam + mu))))
_expand_keys(sm.solve(poisson - (lam / (2 * (lam + mu)))))
_expand_keys(sm.solve(p_wave - (lam + 2 * mu)))
_expand_keys(sm.solve(bulk - (young / (3 * (1 - 2 * poisson)))))
_expand_keys(sm.solve(p_wave - ((young * (1 - poisson))
/ ((1 + poisson) * (1 - 2 * poisson)))))
# Choose the correct root manually.
## relations[('p_wave', 'young', 'poisson')] \
## = (young - p_wave + (-10*p_wave*young + young**2 +
## 9*p_wave**2)**(0.5))/(4*p_wave)
_expand_keys(sm.solve(lam - (young * poisson
/ ((1 + poisson) * (1 - 2 * poisson)))))
# Choose the correct root.
## relations[('lam', 'young', 'poisson')] \
## = (lam + young - (2*lam*young + young**2 +
## 9*(lam**2))**(0.5))/(-4*lam)
_expand_keys(sm.solve(mu - (young / (2 * (1 + poisson)))))
_expand_keys(sm.solve(bulk - (young * mu / (3 * (3 * mu - young)))))
_expand_keys(sm.solve(p_wave - (mu * (4 * mu - young)
/ (3 * mu - young))))
_expand_keys(sm.solve(young - (9 * bulk * (bulk - lam)
/ (3 * bulk - lam))))
_expand_keys(sm.solve(poisson - (lam / (3 * bulk - lam))))
_expand_keys(sm.solve(p_wave - (3 * bulk - 2 * lam)))
_expand_keys(sm.solve(poisson - ((3 * bulk - 2 * mu)
/ (2 * (3 * bulk + mu)))))
_expand_keys(sm.solve(p_wave - (bulk + 4 * mu / 3)))
_expand_keys(sm.solve(p_wave - (lam * (1 - poisson) / poisson)))
_expand_keys(sm.solve(p_wave - (2 * mu * (1 - poisson)
/ (1 - 2 * poisson))))
_expand_keys(sm.solve(p_wave - (3 * bulk * (1 - poisson)
/ (1 + poisson))))
_expand_keys(sm.solve(p_wave - (3 * bulk * (3 * bulk + young)
/ (9 * bulk - young))))
_expand_keys(sm.solve(young - ((lam*p_wave + p_wave**2 - 2*lam**2)
/ (lam + p_wave))))
fd = open(os.path.join(os.path.dirname(__file__),
'elastic_constants.py'), 'w')
fd.write("""
from __future__ import division
import sympy as sm
names = ['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave']
bulk, lam, mu, young, poisson, p_wave = sm.symbols(names, real=True)
relations = {
%s
}
""" % ',\n'.join([' %s : %s' % (key, val)
for key, val in relations.iteritems()]))
fd.close()
return relations
def init(self, young=None, poisson=None, bulk=None, lam=None,
mu=None, p_wave=None):
"""
Set exactly two of the elastic constants, and compute the
remaining. (Re)-initializes the existing instance of ElasticConstants.
"""
Struct.__init__(self, young=young, poisson=poisson, bulk=bulk, lam=lam,
mu=mu, p_wave=p_wave)
values = {}
for key, val in self.__dict__.iteritems():
if (key in self.names) and (val is not None):
values[key] = val
known = values.keys()
if len(known) != 2:
raise ValueError('exactly two elastic constants must be provided!')
unknown = set(self.names).difference(known)
for name in unknown:
key = tuple(sorted(known)) + (name,)
val = float(self.relations[key].n(subs=values))
setattr(self, name, val)
def get(self, names):
"""
Get the named elastic constants.
"""
out = [getattr(self, name) for name in names]
return out
class TransformToPlane( Struct ):
"""Transformmations of constitutive law coefficients of 3D problems to 2D."""
def __init__( self, iplane = None ):
"""`iplane` ... vector of indices denoting the plane, e.g.: [0, 1]"""
if iplane is None:
iplane = [0, 1]
# Choose the "master" variables and the "slave" ones
# ... for vectors
i_m = nm.sort( iplane )
i_s = nm.setdiff1d( nm.arange( 3 ), i_m )
# ... for second order tensors (symmetric storage)
i_ms = {(0, 1) : [0, 1, 3],
(0, 2) : [0, 2, 4],
(1, 2) : [1, 2, 5]}[tuple( i_m )]
i_ss = nm.setdiff1d( nm.arange( 6 ), i_ms )
Struct.__init__( self, iplane = iplane,
i_m = i_m, i_s = i_s,
i_ms = i_ms, i_ss = i_ss )
def tensor_plane_stress( self, c3 = None, d3 = None, b3 = None ):
"""Transforms all coefficients of the piezoelectric constitutive law
from 3D to plane stress problem in 2D: strain/stress ordering/ 11 22
33 12 13 23. If `d3` is None, uses only the stiffness tensor `c3`.
`c3` ... stiffness tensor
`d3` ... dielectric tensor
`b3` ... piezoelectric coupling tensor"""
mg = nm.meshgrid
cs = c3[mg(self.i_ss,self.i_ss)]
cm = c3[mg(self.i_ss,self.i_ms)].T
if d3 is None: # elasticity only.
A = cs
Feps = cm
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
return c2
else:
dm = d3[mg(self.i_s,self.i_m)].T
ds = d3[mg(self.i_s,self.i_s)]
ii = mg( self.i_s, self.i_ss )
A = nm.r_[nm.c_[cs, b3[ii]],
nm.c_[b3[ii].T, -ds]] #=> sym !!!
F = nm.r_[nm.c_[cm, b3[mg(self.i_m,self.i_ss)]],
nm.c_[b3[mg(self.i_s,self.i_ms)].T, -dm ]]
Feps = F[:,:3]
FE = F[:,3:]
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
d2 = d3[mg(self.i_m,self.i_m)] \
- nm.dot( FE.T, nm.dot( Ainv, FE ) )
b2 = b3[mg(self.i_m,self.i_ms)].T \
- nm.dot( FE.T, nm.dot( Ainv, Feps ) )
return c2, d2, b2
| bsd-3-clause | -4,280,246,495,239,971,000 | 33.371658 | 81 | 0.496227 | false |
opennode/waldur-mastermind | src/waldur_mastermind/booking/processors.py | 1 | 4711 | from django.db import transaction
from django.utils import timezone
from django.utils.dateparse import datetime_re, parse_datetime
from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import ValidationError
from waldur_mastermind.booking.utils import (
get_offering_bookings,
get_other_offering_booking_requests,
)
from waldur_mastermind.marketplace import models as marketplace_models
from waldur_mastermind.marketplace import processors
from .utils import TimePeriod, is_interval_in_schedules, sort_attributes_schedules
class BookingCreateProcessor(processors.BaseOrderItemProcessor):
def process_order_item(self, user):
with transaction.atomic():
sort_attributes_schedules(self.order_item.attributes)
resource = marketplace_models.Resource(
project=self.order_item.order.project,
offering=self.order_item.offering,
plan=self.order_item.plan,
limits=self.order_item.limits,
attributes=self.order_item.attributes,
name=self.order_item.attributes.get('name') or '',
state=marketplace_models.Resource.States.CREATING,
)
resource.init_cost()
resource.save()
resource.init_quotas()
self.order_item.resource = resource
self.order_item.save(update_fields=['resource'])
def validate_order_item(self, request):
schedules = self.order_item.attributes.get('schedules')
# We check that the schedule is set.
if not schedules:
raise ValidationError(_('Schedules are required.'))
if not len(schedules):
raise ValidationError(_('Schedules are required.'))
for period in schedules:
try:
start = period['start']
end = period['end']
if not start or not end:
raise ValidationError(
_(
'Value \'start\' or \'end\' does not exist in schedules item.'
)
)
except KeyError:
raise ValidationError(
_('Key \'start\' or \'end\' does not exist in schedules item.')
)
for value in [start, end]:
match = datetime_re.match(value)
kw = match.groupdict()
if list(
filter(
lambda x: not kw[x],
['hour', 'month', 'second', 'year', 'tzinfo', 'day', 'minute'],
)
):
raise ValidationError(
_('The value %s does not match the format.') % value
)
if parse_datetime(start) < timezone.now():
raise ValidationError(_('Past slots are not available for selection.'))
# Check that the schedule is available for the offering.
offering = self.order_item.offering
offering_schedules = offering.attributes.get('schedules', [])
for period in schedules:
if not is_interval_in_schedules(
TimePeriod(period['start'], period['end']),
[TimePeriod(i['start'], i['end']) for i in offering_schedules],
):
raise ValidationError(
_(
'Time period from %s to %s is not available for selected offering.'
)
% (period['start'], period['end'])
)
# Check that there are no other bookings.
bookings = get_offering_bookings(offering)
for period in schedules:
if is_interval_in_schedules(
TimePeriod(period['start'], period['end']), bookings
):
raise ValidationError(
_('Time period from %s to %s is not available.')
% (period['start'], period['end'])
)
# Check that there are no other booking requests.
booking_requests = get_other_offering_booking_requests(self.order_item)
for period in schedules:
if is_interval_in_schedules(
TimePeriod(period['start'], period['end']), booking_requests
):
raise ValidationError(
_(
'Time period from %s to %s is not available. Other booking request exists.'
)
% (period['start'], period['end'])
)
class BookingDeleteProcessor(processors.DeleteScopedResourceProcessor):
pass
| mit | 1,439,147,289,079,598,300 | 37.933884 | 99 | 0.545956 | false |
manz/python-mapnik | test/python_tests/sqlite_test.py | 1 | 20465 | #!/usr/bin/env python
from __future__ import print_function
from nose.tools import eq_, raises
from .utilities import execution_path, run_all
import os
import mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def teardown():
index = '../data/sqlite/world.sqlite.index'
if os.path.exists(index):
os.unlink(index)
if 'sqlite' in mapnik.DatasourceCache.plugin_names():
def test_attachdb_with_relative_file():
# The point table and index is in the qgis_spatiallite.sqlite
# database. If either is not found, then this fails
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='point',
attachdb='scratch@qgis_spatiallite.sqlite'
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['pkuid'],1)
test_attachdb_with_relative_file.requires_data = True
def test_attachdb_with_multiple_files():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
attachdb='scratch1@:memory:,scratch2@:memory:',
initdb='''
create table scratch1.attachedtest (the_geom);
create virtual table scratch2.idx_attachedtest_the_geom using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch2.idx_attachedtest_the_geom values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
# the above should not throw but will result in no features
eq_(feature,None)
test_attachdb_with_multiple_files.requires_data = True
def test_attachdb_with_absolute_file():
# The point table and index is in the qgis_spatiallite.sqlite
# database. If either is not found, then this fails
ds = mapnik.SQLite(file=os.getcwd() + '/../data/sqlite/world.sqlite',
table='point',
attachdb='scratch@qgis_spatiallite.sqlite'
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['pkuid'],1)
test_attachdb_with_absolute_file.requires_data = True
def test_attachdb_with_index():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
attachdb='scratch@:memory:',
initdb='''
create table scratch.attachedtest (the_geom);
create virtual table scratch.idx_attachedtest_the_geom using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch.idx_attachedtest_the_geom values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_attachdb_with_index.requires_data = True
def test_attachdb_with_explicit_index():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
index_table='myindex',
attachdb='scratch@:memory:',
initdb='''
create table scratch.attachedtest (the_geom);
create virtual table scratch.myindex using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch.myindex values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try:
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_attachdb_with_explicit_index.requires_data = True
def test_attachdb_with_sql_join():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3 limit 100)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
fs = ds.featureset()
feature = fs.next()
eq_(feature.id(),1)
expected = {
1995:0,
1996:0,
1997:0,
1998:0,
1999:0,
2000:0,
2001:0,
2002:0,
2003:0,
2004:0,
2005:0,
2006:0,
2007:0,
2008:0,
2009:0,
2010:0,
# this appears to be sqlites way of
# automatically handling clashing column names
'ISO3:1':'ATG',
'OGC_FID':1,
'area':44,
'fips':u'AC',
'iso2':u'AG',
'iso3':u'ATG',
'lat':17.078,
'lon':-61.783,
'name':u'Antigua and Barbuda',
'pop2005':83039,
'region':19,
'subregion':29,
'un':28
}
for k,v in expected.items():
try:
eq_(feature[str(k)],v)
except:
#import pdb;pdb.set_trace()
print('invalid key/v %s/%s for: %s' % (k,v,feature))
test_attachdb_with_sql_join.requires_data = True
def test_attachdb_with_sql_join_count():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3 limit 100)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),100)
test_attachdb_with_sql_join_count.requires_data = True
def test_attachdb_with_sql_join_count2():
'''
sqlite3 world.sqlite
attach database 'business.sqlite' as business;
select count(*) from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),192)
test_attachdb_with_sql_join_count2.requires_data = True
def test_attachdb_with_sql_join_count3():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects!) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),192)
test_attachdb_with_sql_join_count3.requires_data = True
def test_attachdb_with_sql_join_count4():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects! limit 1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),1)
test_attachdb_with_sql_join_count4.requires_data = True
def test_attachdb_with_sql_join_count5():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects! and 1=2) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='[email protected]'
)
# nothing is able to join to business so we don't pick up business schema
eq_(len(ds.fields()),12)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float'])
eq_(len(ds.all_features()),0)
test_attachdb_with_sql_join_count5.requires_data = True
def test_subqueries():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='world_merc',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
eq_(feature['iso2'],u'AG')
eq_(feature['iso3'],u'ATG')
eq_(feature['un'],28)
eq_(feature['name'],u'Antigua and Barbuda')
eq_(feature['area'],44)
eq_(feature['pop2005'],83039)
eq_(feature['region'],19)
eq_(feature['subregion'],29)
eq_(feature['lon'],-61.783)
eq_(feature['lat'],17.078)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
eq_(feature['iso2'],u'AG')
eq_(feature['iso3'],u'ATG')
eq_(feature['un'],28)
eq_(feature['name'],u'Antigua and Barbuda')
eq_(feature['area'],44)
eq_(feature['pop2005'],83039)
eq_(feature['region'],19)
eq_(feature['subregion'],29)
eq_(feature['lon'],-61.783)
eq_(feature['lat'],17.078)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select OGC_FID,GEOMETRY from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(len(feature),1)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select GEOMETRY,OGC_FID,fips from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
# same as above, except with alias like postgres requires
# TODO - should we try to make this work?
#ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
# table='(select GEOMETRY,rowid as aliased_id,fips from world_merc) as table',
# key_field='aliased_id'
# )
#fs = ds.featureset()
#feature = fs.next()
#eq_(feature['aliased_id'],1)
#eq_(feature['fips'],u'AC')
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select GEOMETRY,OGC_FID,OGC_FID as rowid,fips from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['rowid'],1)
eq_(feature['fips'],u'AC')
test_subqueries.requires_data = True
def test_empty_db():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='empty',
)
fs = ds.featureset()
feature = None
try:
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_empty_db.requires_data = True
@raises(RuntimeError)
def test_that_nonexistant_query_field_throws(**kwargs):
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='empty',
)
eq_(len(ds.fields()),25)
eq_(ds.fields(),['OGC_FID', 'scalerank', 'labelrank', 'featurecla', 'sovereignt', 'sov_a3', 'adm0_dif', 'level', 'type', 'admin', 'adm0_a3', 'geou_dif', 'name', 'abbrev', 'postal', 'name_forma', 'terr_', 'name_sort', 'map_color', 'pop_est', 'gdp_md_est', 'fips_10_', 'iso_a2', 'iso_a3', 'iso_n3'])
eq_(ds.field_types(),['int', 'int', 'int', 'str', 'str', 'str', 'float', 'float', 'str', 'str', 'str', 'float', 'str', 'str', 'str', 'str', 'str', 'str', 'float', 'float', 'float', 'float', 'str', 'str', 'float'])
query = mapnik.Query(ds.envelope())
for fld in ds.fields():
query.add_property_name(fld)
# also add an invalid one, triggering throw
query.add_property_name('bogus')
ds.features(query)
test_that_nonexistant_query_field_throws.requires_data = True
def test_intersects_token1():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_intersects_token1.requires_data = True
def test_intersects_token2():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where "a"!="b" and !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_intersects_token2.requires_data = True
def test_intersects_token3():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where "a"!="b" and !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_intersects_token3.requires_data = True
# https://github.com/mapnik/mapnik/issues/1537
# this works because key_field is manually set
def test_db_with_one_text_column():
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
ds = mapnik.SQLite(file=':memory:',
table='test1',
initdb='''
create table test1 (alias TEXT,geometry BLOB);
insert into test1 values ("test",x'%s');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='alias'
)
eq_(len(ds.fields()),1)
eq_(ds.fields(),['alias'])
eq_(ds.field_types(),['str'])
fs = ds.all_features()
eq_(len(fs),1)
feat = fs[0]
eq_(feat.id(),0) # should be 1?
eq_(feat['alias'],'test')
eq_(feat.geometry.to_wkt(),'POINT(0 0)')
def test_db_with_one_untyped_column():
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
ds = mapnik.SQLite(file=':memory:',
table='test1',
initdb='''
create table test1 (geometry BLOB, untyped);
insert into test1 values (x'%s', 'untyped');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='rowid'
)
# ensure the untyped column is found
eq_(len(ds.fields()),2)
eq_(ds.fields(),['rowid', 'untyped'])
eq_(ds.field_types(),['int', 'str'])
def test_db_with_one_untyped_column_using_subquery():
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
ds = mapnik.SQLite(file=':memory:',
table='(SELECT rowid, geometry, untyped FROM test1)',
initdb='''
create table test1 (geometry BLOB, untyped);
insert into test1 values (x'%s', 'untyped');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='rowid'
)
# ensure the untyped column is found
eq_(len(ds.fields()),3)
eq_(ds.fields(),['rowid', 'untyped', 'rowid'])
eq_(ds.field_types(),['int', 'str', 'int'])
def test_that_64bit_int_fields_work():
ds = mapnik.SQLite(file='../data/sqlite/64bit_int.sqlite',
table='int_table',
use_spatial_index=False
)
eq_(len(ds.fields()),3)
eq_(ds.fields(),['OGC_FID','id','bigint'])
eq_(ds.field_types(),['int','int','int'])
fs = ds.featureset()
feat = fs.next()
eq_(feat.id(),1)
eq_(feat['OGC_FID'],1)
eq_(feat['bigint'],2147483648)
feat = fs.next()
eq_(feat.id(),2)
eq_(feat['OGC_FID'],2)
eq_(feat['bigint'],922337203685477580)
test_that_64bit_int_fields_work.requires_data = True
def test_null_id_field():
# silence null key warning: https://github.com/mapnik/mapnik/issues/1889
default_logging_severity = mapnik.logger.get_severity()
mapnik.logger.set_severity(mapnik.severity_type.none)
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
# note: the osm_id should be declared INTEGER PRIMARY KEY
# but in this case we intentionally do not make this a valid pkey
# otherwise sqlite would turn the null into a valid, serial id
ds = mapnik.SQLite(file=':memory:',
table='test1',
initdb='''
create table test1 (osm_id INTEGER,geometry BLOB);
insert into test1 values (null,x'%s');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='osm_id'
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
mapnik.logger.set_severity(default_logging_severity)
if __name__ == "__main__":
setup()
result = run_all(eval(x) for x in dir() if x.startswith("test_"))
teardown()
exit(result)
| lgpl-2.1 | 4,323,102,137,931,213,300 | 39.766932 | 305 | 0.538334 | false |
frac/lettuce | tests/functional/output_features/success_table/success_table_steps.py | 1 | 1617 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lettuce import step
from lettuce import world
from lettuce.terrain import before
from nose.tools import assert_equals
@before.all
def set_balance():
world.my_balance = 0
@step('I have (\d+) bucks')
def compare_bucks(step, cash):
assert_equals(world.my_balance, int(cash))
@step('I have these items')
def havetheseitems(step):
cars = {}
for data in step.hashes:
key = data['name']
value = int(data['price'])
cars[key] = value
world.cars = cars
@step('sell the "([^"]+)"')
def sell_item(step, name):
world.my_balance += world.cars[name]
del world.cars[name]
@step('my garage contains:')
def alsothese(step):
cars = {}
for data in step.hashes:
key = data['name']
value = int(data['price'])
cars[key] = value
assert_equals(cars, world.cars)
| gpl-3.0 | 2,192,169,248,575,762,400 | 28.925926 | 71 | 0.686881 | false |
prculley/gramps | gramps/plugins/view/geofamily.py | 1 | 20779 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011-2016 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Geography for one family
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import operator
from gi.repository import Gdk
KEY_TAB = Gdk.KEY_Tab
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("GeoGraphy.geofamily")
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import EventRoleType, EventType
from gramps.gen.config import config
from gramps.gen.datehandler import displayer
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.gen.utils.place import conv_lat_lon
from gramps.gui.views.bookmarks import FamilyBookmarks
from gramps.plugins.lib.maps.geography import GeoGraphyView
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UI_DEF = '''\
<ui>
<menubar name="MenuBar">
<menu action="GoMenu">
<placeholder name="CommonGo">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
</placeholder>
</menu>
<menu action="EditMenu">
<placeholder name="CommonEdit">
<menuitem action="PrintView"/>
</placeholder>
</menu>
<menu action="BookMenu">
<placeholder name="AddEditBook">
<menuitem action="AddBook"/>
<menuitem action="EditBook"/>
</placeholder>
</menu>
</menubar>
<toolbar name="ToolBar">
<placeholder name="CommonNavigation">
<toolitem action="Back"/>
<toolitem action="Forward"/>
</placeholder>
<placeholder name="CommonEdit">
<toolitem action="PrintView"/>
</placeholder>
</toolbar>
</ui>
'''
# pylint: disable=no-member
# pylint: disable=unused-variable
# pylint: disable=unused-argument
#-------------------------------------------------------------------------
#
# GeoView
#
#-------------------------------------------------------------------------
class GeoFamily(GeoGraphyView):
"""
The view used to render family map.
"""
def __init__(self, pdata, dbstate, uistate, nav_group=0):
GeoGraphyView.__init__(self, _('Family places map'),
pdata, dbstate, uistate,
FamilyBookmarks,
nav_group)
self.dbstate = dbstate
self.uistate = uistate
self.place_list = []
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.nbplaces = 0
self.nbmarkers = 0
self.sort = []
self.additional_uis.append(self.additional_ui())
self.no_show_places_in_status_bar = False
self.cal = None
def get_title(self):
"""
Used to set the titlebar in the configuration window.
"""
return _('GeoFamily')
def get_stock(self):
"""
Returns the name of the stock icon to use for the display.
This assumes that this icon has already been registered
as a stock icon.
"""
return 'geo-show-family'
def get_viewtype_stock(self):
"""Type of view in category
"""
return 'geo-show-family'
def additional_ui(self):
"""
Specifies the UIManager XML code that defines the menus and buttons
associated with the interface.
"""
return _UI_DEF
def navigation_type(self):
"""
Indicates the navigation type. Navigation type can be the string
name of any of the primary objects.
"""
return 'Family'
def goto_handle(self, handle=None):
"""
Rebuild the tree with the given person handle as the root.
"""
self.build_tree()
def build_tree(self):
"""
This is called by the parent class when the view becomes visible. Since
all handling of visibility is now in rebuild_trees, see that for more
information.
"""
if not self.dbstate.is_open():
return
if self.uistate.get_active('Family'):
self._createmap(self.uistate.get_active('Family'))
else:
self._createmap(self.uistate.get_active('Person'))
def _createpersonmarkers(self, dbstate, person, comment, fam_id):
"""
Create all markers for the specified person.
"""
self.cal = config.get('preferences.calendar-format-report')
latitude = longitude = ""
if person:
# For each event, if we have a place, set a marker.
for event_ref in person.get_event_ref_list():
if not event_ref:
continue
role = event_ref.get_role()
event = dbstate.db.get_event_from_handle(event_ref.ref)
eyear = event.get_date_object().to_calendar(self.cal).get_year()
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(latitude,
longitude, "D.D8")
descr = _pd.display(dbstate.db, place)
evt = EventType(event.get_type())
descr1 = _("%(eventtype)s : %(name)s") % {
'eventtype': evt,
'name': _nd.display(person)}
# place.get_longitude and place.get_latitude return
# one string. We have coordinates when the two values
# contains non null string.
if longitude and latitude:
if not self._present_in_places_list(2,
str(descr1 + descr + str(evt))):
self._append_to_places_list(descr,
str(descr1 + descr + str(evt)),
_nd.display(person),
latitude, longitude,
role, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
fam_id
)
else:
self._append_to_places_without_coord(
place.gramps_id, descr)
family_list = person.get_family_handle_list()
for family_hdl in family_list:
family = self.dbstate.db.get_family_from_handle(family_hdl)
if family is not None:
for event_ref in family.get_event_ref_list():
if event_ref:
event = dbstate.db.get_event_from_handle(
event_ref.ref)
role = event_ref.get_role()
if event.get_place_handle():
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(
place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
(latitude,
longitude) = conv_lat_lon(latitude,
longitude,
"D.D8")
descr = _pd.display(dbstate.db, place)
evt = EventType(event.get_type())
(father_name,
mother_name) = self._get_father_and_mother_name(event)
descr1 = "%s : %s - " % (evt,
father_name)
descr1 = "%s%s" % (descr1, mother_name)
eyear = event.get_date_object().to_calendar(self.cal).get_year()
if longitude and latitude:
if not self._present_in_places_list(
2, str(descr1 + descr + str(evt))):
self._append_to_places_list(
descr,
str(descr1 + descr + str(evt)),
_nd.display(person),
latitude, longitude,
role, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
family.gramps_id
)
else:
self._append_to_places_without_coord(place.gramps_id, descr)
def family_label(self, family):
"""
Create the family label depending on existence of the father and mother
"""
if family is None:
return "Unknown"
father = mother = None
hdl = family.get_father_handle()
if hdl:
father = self.dbstate.db.get_person_from_handle(hdl)
hdl = family.get_mother_handle()
if hdl:
mother = self.dbstate.db.get_person_from_handle(hdl)
if father and mother:
label = _("%(gramps_id)s : %(father)s and %(mother)s") % {
'father' : _nd.display(father),
'mother' : _nd.display(mother),
'gramps_id' : family.gramps_id,
}
elif father:
label = "%(gramps_id)s : %(father)s" % {
'father' : _nd.display(father),
'gramps_id' : family.gramps_id,
}
elif mother:
label = "%(gramps_id)s : %(mother)s" % {
'mother' : _nd.display(mother),
'gramps_id' : family.gramps_id,
}
else:
# No translation for bare gramps_id
label = "%(gramps_id)s :" % {
'gramps_id' : family.gramps_id,
}
return label
def _createmap_for_one_family(self, family):
"""
Create all markers for one family : all event's places with a lat/lon.
"""
dbstate = self.dbstate
self.message_layer.add_message(
_("Family places for %s") % self.family_label(family))
person = None
if family:
person = dbstate.db.get_person_from_handle(
family.get_father_handle())
else:
return
family_id = family.gramps_id
if person is None: # family without father ?
handle = family.get_mother_handle()
if handle:
person = dbstate.db.get_person_from_handle(handle)
if person is None:
handle = self.uistate.get_active('Person')
if handle:
person = dbstate.db.get_person_from_handle(handle)
if person is not None:
family_list = person.get_family_handle_list()
if len(family_list) > 0:
fhandle = family_list[0] # first is primary
fam = dbstate.db.get_family_from_handle(fhandle)
father = mother = None
handle = fam.get_father_handle()
if handle:
father = dbstate.db.get_person_from_handle(handle)
if father:
comment = _("Father : %(id)s : %(name)s") % {
'id': father.gramps_id,
'name': _nd.display(father)}
self._createpersonmarkers(dbstate, father,
comment, family_id)
handle = fam.get_mother_handle()
if handle:
mother = dbstate.db.get_person_from_handle(handle)
if mother:
comment = _("Mother : %(id)s : %(name)s") % {
'id': mother.gramps_id,
'name': _nd.display(mother)}
self._createpersonmarkers(dbstate, mother,
comment, family_id)
index = 0
child_ref_list = fam.get_child_ref_list()
if child_ref_list:
for child_ref in child_ref_list:
child = dbstate.db.get_person_from_handle(child_ref.ref)
if child:
index += 1
comment = _("Child : %(id)s - %(index)d "
": %(name)s") % {
'id' : child.gramps_id,
'index' : index,
'name' : _nd.display(child)
}
self._createpersonmarkers(dbstate, child,
comment, family_id)
else:
comment = _("Person : %(id)s %(name)s has no family.") % {
'id' : person.gramps_id,
'name' : _nd.display(person)
}
self._createpersonmarkers(dbstate, person, comment, family_id)
def _createmap(self, handle):
"""
Create all markers for each people's event in the database which has
a lat/lon.
"""
if not handle:
return
self.place_list = []
self.place_without_coordinates = []
self.places_found = []
self.nbplaces = 0
self.nbmarkers = 0
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.message_layer.clear_messages()
if self.dbstate.db.has_family_handle(handle):
family = self.dbstate.db.get_family_from_handle(handle)
self._createmap_for_one_family(family)
else:
person = self.dbstate.db.get_person_from_handle(handle)
if not person:
return
family_list = person.get_family_handle_list()
for family_hdl in family_list:
family = self.dbstate.db.get_family_from_handle(family_hdl)
if family is not None:
self._createmap_for_one_family(family)
self.sort = sorted(self.place_list,
key=operator.itemgetter(3, 4, 6)
)
self._create_markers()
def add_event_bubble_message(self, event, lat, lon, mark, menu):
"""
Add an item to the popup menu.
"""
self.itemoption = Gtk.Menu()
itemoption = self.itemoption
itemoption.show()
menu.set_submenu(itemoption)
modify = Gtk.MenuItem(label=_("Edit Family"))
modify.show()
modify.connect("activate", self.edit_family, event, lat, lon, mark)
itemoption.append(modify)
modify = Gtk.MenuItem(label=_("Edit Person"))
modify.show()
modify.connect("activate", self.edit_person, event, lat, lon, mark)
itemoption.append(modify)
modify = Gtk.MenuItem(label=_("Edit Event"))
modify.show()
modify.connect("activate", self.edit_event, event, lat, lon, mark)
itemoption.append(modify)
center = Gtk.MenuItem(label=_("Center on this place"))
center.show()
center.connect("activate", self.center_here, event, lat, lon, mark)
itemoption.append(center)
def bubble_message(self, event, lat, lon, marks):
"""
Add the popup menu.
"""
self.menu = Gtk.Menu()
menu = self.menu
menu.set_title("family")
message = ""
oldplace = ""
prevmark = None
for mark in marks:
if message != "":
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.add_event_bubble_message(event, lat, lon,
prevmark, add_item)
if mark[0] != oldplace:
message = "%s :" % mark[0]
self.add_place_bubble_message(event, lat, lon,
marks, menu, message, mark)
oldplace = mark[0]
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
# format the date as described in preferences.
date = displayer.display(evt.get_date_object())
if date == "":
date = _("Unknown")
if mark[5] == EventRoleType.PRIMARY:
message = "(%s) %s : %s" % (date, mark[7], mark[1])
elif mark[5] == EventRoleType.FAMILY:
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
(father_name,
mother_name) = self._get_father_and_mother_name(evt)
message = "(%s) %s : %s - %s" % (date, mark[7],
father_name, mother_name)
else:
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
descr = evt.get_description()
if descr == "":
descr = _('No description')
message = "(%s) %s => %s" % (date, mark[5], descr)
prevmark = mark
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.add_event_bubble_message(event, lat, lon, prevmark, add_item)
menu.popup(None, None, None,
None, event.button, event.time)
return 1
def add_specific_menu(self, menu, event, lat, lon):
"""
Add specific entry to the navigation menu.
"""
return
def get_default_gramplets(self):
"""
Define the default gramplets for the sidebar and bottombar.
"""
return (("Family Filter",),
())
| gpl-2.0 | 6,691,864,510,758,257,000 | 40.39243 | 104 | 0.455893 | false |
drjova/cds-demosite | tests/unit/test_provider.py | 1 | 2108 | # -*- coding: utf-8 -*-
#
# This file is part of CDS.
# Copyright (C) 2015, 2016 CERN.
#
# CDS is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CDS is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CDS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test access control package."""
from __future__ import absolute_import, print_function
import mock
import pytest
from cds.modules.records.minters import recid_minter
from invenio_pidstore.models import PIDStatus
def test_recid_provider(db):
"""Test the CDS recid provider."""
with mock.patch('requests.get') as httpmock, mock.patch(
'invenio_pidstore.models.PersistentIdentifier.create')\
as pid_create:
pid_create.configure_mock(**{'return_value.pid_provider': None,
'return_value.pid_value': 1})
httpmock.return_value.text = '1'
data = dict()
uuid = '12345678123456781234567812345678'
recid_minter(uuid, data)
assert data['recid'] == 1
pid_create.assert_called_once_with(
'recid', '1', pid_provider=None, object_type='rec',
object_uuid=uuid, status=PIDStatus.REGISTERED)
def test_recid_provider_exception(db):
"""Test if providing a recid will cause an error."""
with pytest.raises(AssertionError):
recid_minter('12345678123456781234567812345678', dict({'recid': 1}))
| gpl-2.0 | 496,742,994,712,681,000 | 35.982456 | 76 | 0.695446 | false |
iScrE4m/RSES | tests/objects/test_stock.py | 1 | 2053 | # coding=utf-8
from pytest import raises
from rses.src.objects import stock
import rses_errors
def test_ingredient_type_create(ingredient_type_no_create):
ingredient_type = stock.IngredientType(name=ingredient_type_no_create)
assert ingredient_type.id
assert ingredient_type.name == ingredient_type_no_create
def test_ingredient_type_rename(ingredient_type, ingredient_type_new_name):
ingredient_type.name = ingredient_type_new_name
assert ingredient_type.name == ingredient_type_new_name
new = stock.IngredientType(ingredient_type_id=ingredient_type.id)
assert new.name == ingredient_type_new_name
assert ingredient_type == new
def test_ingredient_type_delete(ingredient_type):
ingredient_type.delete()
with raises(rses_errors.DoesNotExist) as e:
stock.IngredientType.load_by_name(ingredient_type.name)
assert ingredient_type.name in str(e)
def test_ingredient_create(ingredient_type,
ingredient_no_create,
ingredient_unit,
positive_float,
positive_float2,
positive_int):
"""
In arguments, ingredient_type has to come before ingredient_no_create,
otherwise teardown of ingredient_type will come first and will cascade
delete ingredient_unit
"""
ingredient = stock.Ingredient(name=ingredient_no_create,
unit=ingredient_unit,
ingredient_type=ingredient_type,
suggestion_threshold=positive_float,
rebuy_threshold=positive_float2,
durability=positive_int)
assert ingredient.name == ingredient_no_create
assert ingredient.unit == ingredient_unit
assert ingredient.type == ingredient_type
assert ingredient.suggestion_threshold == positive_float
assert ingredient.rebuy_threshold == positive_float2
assert ingredient.durability == positive_int
| mit | 7,995,718,306,625,569,000 | 39.254902 | 75 | 0.651242 | false |
kimbauters/sparsepy | search_structure.py | 1 | 8409 | import textwrap # used for embellishing the Graphviz DOT file layout
class Node:
# since we will be using a lot of Node instances, optimise the memory use by relying on slots rather than a dict
__slots__ = ['problem', 'parent', 'action', 'effect', 'state', 'is_goal', 'children',
'visits', 'utility', 'untried_actions', 'tried_actions']
def __init__(self, problem, parent, action, effect, state):
self.problem = problem # the problem space in which this node is relevant
self.parent = parent # parent node of this node
self.action = action # action that was used to get from the parent node to this node
self.effect = effect # effect of the action that resulted in the current node
self.state = state # the state of the world in this node
self.is_goal = problem.goal_reached(self.state) # whether or not this node represents a goal state
self.children = dict() # dictionary of children of this node, key-ed by the action and effect to get to them
self.visits = 0 # number of times this node has been visited
self.utility = 0 # cumulative utility from going through this node
# the available actions for which the current state agrees with their preconditions
self.untried_actions = [a for a in problem.actions if
any(pos <= self.state and not (neg & self.state) for neg, pos in a.preconditions)]
self.tried_actions = {} # dictionary with the actions we tried so far as keys,
# and linked to a tuple consisting of their average reward and number of times we applied them: e.g.
# a1 -> (15, 2)
# a2 -> (10, 1)
def simulate_action(self, action, most_probable=False):
""" Execute the rollout of an action, *without* taking this action out of the list of untried actions.
:param action: the action to execute
:return: a new node obtained by applying the action in the current node """
if most_probable:
effect = action.effects[0]
else:
effect = action.outcome() # trigger one of the effects of the action
if (action, effect) in self.children: # check whether we already applied this action, and gotten this effect
child = self.children[(action, effect)] # we already encountered this state; retrieve it
else:
state = self.state - effect.delete | effect.add # compute the new state by using set operations
child = Node(self.problem, self, action, effect, state) # create a new node with state
self.children[(action, effect)] = child # add this child to the children of this node
return child
def perform_action(self, action):
""" Execute the rollout of an action, *with* taking this action out of the list of untried actions.
:param action: the action to execute
:return: a new node obtained through action in the current node, and the reward associated with this effect
:raises: a ValueError if trying to perform an action that is already tried for this node """
self.untried_actions.remove(action) # remove the action from the list of untried actions
self.tried_actions[action] = (0, 0) # add the action to the sequence of actions we already tried
return self.simulate_action(action) # get and return (one of) the child(ren) as a result of applying the action
def rollout_actions(self, rollout_action, depth, horizon):
""" Organise a rollout from a given node to either a goal node or a leaf node (e.g. by hitting the horizon).
:param rollout_action: the heuristic to select the action to use for the rollout
:param depth: the current depth at which the rollout is requested
:param horizon: the maximum depth to consider
:return: a new node obtained through action in the current node, and the reward associated with this effect
:raises: a ValueError if trying to perform an action that is already tried for this node """
if self.is_goal: # check if we have hit a goal state
return self, depth
elif depth < horizon:
action = rollout_action(self) # use the heuristic to select the next action to perform
node = self.simulate_action(action, True) # simulate the execution of this action
return node.rollout_actions(rollout_action, depth + 1, horizon)
else: # the horizon has been reached; return the current node, reward so far, and the current depth
return self, depth
def update(self, discounting):
""" Traverse back up a branch to collect all rewards and to backpropagate these rewards to successor nodes.
:param discounting: the discounting factor to use when updating ancestor nodes """
node = self # set this node as the current node in the backpropagation
current_reward = 0 # initialise the reward to 0
while node is not None: # continue until we have processed the root node
current_reward *= discounting # discount the reward obtained in descendants
if node.is_goal: # check if this node is a goal state
current_reward += self.problem.goal_reward # if it is, assign to it the goal reward
if node.effect:
current_reward += node.effect.reward # add any rewards obtained associated with the effect
if not node.parent or node.action in node.parent.tried_actions: # only update the real non-simulated nodes
if node.parent: # check if it is not the root node; continue if not
utility, visits = node.parent.tried_actions[node.action] # get the action info from the parent
node.parent.tried_actions[node.action] = (utility + current_reward, visits + 1) # and update
node.utility += current_reward # update the total utility gathered in this node
node.visits += 1 # update the number of visits to this node
node = node.parent # move to the parent node
def create_graphviz(self, location="graphviz.dot"):
""" Produce the contents for a Graphviz DOT file representing the search tree as starting from this node.
:param location: the location of where to save the generated file.
:return: the location where the Graphviz DOT file has been saved """
output = "graph sparsepy {\n"
output += textwrap.indent(self.__graphviz(), " ")
output += "}"
with open(location, 'w') as file:
file.write(output)
return location
def __graphviz(self, name="0"):
""" Internal method used in the creation of the Graphviz DOT file. This method will be called recursively,
and only helps to fill the body specifications of the DOT file. """
output = 'decision_node' + str(name) # give a unique name to this node
output += ' [label="' + ', '.join(self.state) + '\n' + \
str('%0.2f' % self.utility) + ',' + str(self.visits) + '"]\n' # add the label to identify its state
next_id = 0
for key, child in self.children.items():
(action, effect) = key # extract the action out of the (action, effect) pair
if action in self.tried_actions: # if this is an action we actually performed, not just simulated: show it
output += 'action_node' + str(name) + action.name
output += '[label="' + action.name + '", shape=box]\n'
child_node_name = name + '_' + str(next_id)
output += child.__graphviz(child_node_name)
output += 'action_node' + str(name) + action.name + ' -- '
output += 'decision_node' + str(child_node_name) + ' [style=dashed, label="' + str(effect) + '"]\n'
next_id += 1
for action, info in self.tried_actions.items():
reward, visits = info
output += 'decision_node' + str(name) + ' -- action_node' + str(name) + action.name
output += ' [label="' + '%0.2f' % reward + ',' + str(visits) + '", penwidth="' + str(visits**(1/4)) + '"]\n'
return output
| mit | 5,978,570,223,246,047,000 | 68.663866 | 120 | 0.625996 | false |
alexanderganderson/Diffusion-Probabilistic-Models | regression.py | 1 | 8365 | """
Defines the function approximators
"""
import numpy as np
import theano.tensor as T
# from theano.tensor.signal import downsample
from blocks.bricks import Activation, MLP, Initializable, application, Identity
from blocks.bricks.conv import ConvolutionalActivation
from blocks.initialization import IsotropicGaussian, Constant, Orthogonal
# TODO IsotropicGaussian init will be wrong scale for some layers
class LeakyRelu(Activation):
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_):
return T.switch(input_ > 0, input_, 0.05*input_)
dense_nonlinearity = LeakyRelu()
# dense_nonlinearity = Tanh()
conv_nonlinearity = LeakyRelu()
class MultiScaleConvolution(Initializable):
def __init__(self, num_channels, num_filters, spatial_width, num_scales, filter_size, downsample_method='meanout', name=""):
"""
A brick implementing a single layer in a multi-scale convolutional network.
"""
super(MultiScaleConvolution, self).__init__()
self.num_scales = num_scales
self.filter_size = filter_size
self.num_filters = num_filters
self.spatial_width = spatial_width
self.downsample_method = downsample_method
self.children = []
print "adding MultiScaleConvolution layer"
# for scale in range(self.num_scales-1, -1, -1):
for scale in range(self.num_scales):
print "scale %d"%scale
conv_layer = ConvolutionalActivation(activation=conv_nonlinearity.apply,
filter_size=(filter_size,filter_size), num_filters=num_filters,
num_channels=num_channels, image_size=(spatial_width/2**scale, spatial_width/2**scale),
# assume images are spatially smooth -- in which case output magnitude scales with
# # filter pixels rather than square root of # filter pixels, so initialize
# accordingly.
weights_init=IsotropicGaussian(std=np.sqrt(1./(num_filters))/filter_size**2),
biases_init=Constant(0), border_mode='full', name=name+"scale%d"%scale)
self.children.append(conv_layer)
def downsample(self, imgs_in, scale):
"""
Downsample an image by a factor of 2**scale
"""
imgs = imgs_in.copy()
if scale == 0:
return imgs
# if self.downsample_method == 'maxout':
# print "maxout",
# imgs_maxout = downsample.max_pool_2d(imgs.copy(), (2**scale, 2**scale), ignore_border=False)
# else:
# print "meanout",
# imgs_maxout = self.downsample_mean_pool_2d(imgs.copy(), (2**scale, 2**scale))
num_imgs = imgs.shape[0].astype('int16')
num_layers = imgs.shape[1].astype('int16')
nlx0 = imgs.shape[2].astype('int16')
nlx1 = imgs.shape[3].astype('int16')
scalepow = np.int16(2**scale)
# downsample
imgs = imgs.reshape((num_imgs, num_layers, nlx0/scalepow, scalepow, nlx1/scalepow, scalepow))
imgs = T.mean(imgs, axis=5)
imgs = T.mean(imgs, axis=3)
return imgs
@application
def apply(self, X):
print "MultiScaleConvolution apply"
nsamp = X.shape[0].astype('int16')
Z = 0
overshoot = (self.filter_size - 1)/2
imgs_accum = 0 # accumulate the output image
for scale in range(self.num_scales-1, -1, -1):
# downsample image to appropriate scale
imgs_down = self.downsample(X, scale)
# do a convolutional transformation on it
conv_layer = self.children[scale]
# NOTE this is different than described in the paper, since each conv_layer
# includes a nonlinearity -- it's not just one nonlinearity at the end
imgs_down_conv = conv_layer.apply(imgs_down)
# crop the edge so it's the same size as the input at that scale
imgs_down_conv_croppoed = imgs_down_conv[:,:,overshoot:-overshoot,overshoot:-overshoot]
imgs_accum += imgs_down_conv_croppoed
if scale > 0:
# scale up by factor of 2
layer_width = self.spatial_width/2**scale
imgs_accum = imgs_accum.reshape((nsamp, self.num_filters, layer_width, 1, layer_width, 1))
imgs_accum = T.concatenate((imgs_accum, imgs_accum), axis=5)
imgs_accum = T.concatenate((imgs_accum, imgs_accum), axis=3)
imgs_accum = imgs_accum.reshape((nsamp, self.num_filters, layer_width*2, layer_width*2))
return imgs_accum/self.num_scales
class MultiLayerConvolution(Initializable):
def __init__(self, n_layers, n_hidden, spatial_width, n_colors, n_scales, filter_size=3):
"""
A brick implementing a multi-layer convolutional network.
TODO make this multi-scale multi-layer convolution
"""
super(MultiLayerConvolution, self).__init__()
self.children = []
num_channels = n_colors
for ii in xrange(n_layers):
conv_layer = MultiScaleConvolution(num_channels, n_hidden, spatial_width, n_scales, filter_size, name="layer%d_"%ii)
self.children.append(conv_layer)
num_channels = n_hidden
@application
def apply(self, X):
Z = X
for conv_layer in self.children:
Z = conv_layer.apply(Z)
return Z
class MLP_conv_dense(Initializable):
def __init__(self, n_layers_conv, n_layers_dense_lower, n_layers_dense_upper,
n_hidden_conv, n_hidden_dense_lower, n_hidden_dense_lower_output, n_hidden_dense_upper,
spatial_width, n_colors, n_scales, n_temporal_basis):
"""
The multilayer perceptron, that provides temporal weighting coefficients for mu and sigma
images. This consists of a lower segment with a convolutional MLP, and optionally with a
dense MLP in parallel. The upper segment then consists of a per-pixel dense MLP
(convolutional MLP with 1x1 kernel).
"""
super(MLP_conv_dense, self).__init__()
self.n_colors = n_colors
self.spatial_width = spatial_width
self.n_hidden_dense_lower = n_hidden_dense_lower
self.n_hidden_dense_lower_output = n_hidden_dense_lower_output
self.n_hidden_conv = n_hidden_conv
## the lower layers
self.mlp_conv = MultiLayerConvolution(n_layers_conv, n_hidden_conv, spatial_width, n_colors, n_scales)
self.children = [self.mlp_conv]
if n_hidden_dense_lower > 0 and n_layers_dense_lower > 0:
n_input = n_colors*spatial_width**2
n_output = n_hidden_dense_lower_output*spatial_width**2
self.mlp_dense_lower = MLP([dense_nonlinearity] * n_layers_conv,
[n_input] + [n_hidden_dense_lower] * (n_layers_conv-1) + [n_output],
name='MLP dense lower', weights_init=Orthogonal(), biases_init=Constant(0))
self.children.append(self.mlp_dense_lower)
else:
n_hidden_dense_lower_output = 0
## the upper layers (applied to each pixel independently)
n_output = n_colors*n_temporal_basis*2 # "*2" for both mu and sigma
self.mlp_dense_upper = MLP([dense_nonlinearity] * (n_layers_dense_upper-1) + [Identity()],
[n_hidden_conv+n_hidden_dense_lower_output] +
[n_hidden_dense_upper] * (n_layers_dense_upper-1) + [n_output],
name='MLP dense upper', weights_init=Orthogonal(), biases_init=Constant(0))
self.children.append(self.mlp_dense_upper)
@application
def apply(self, X):
"""
Take in noisy input image and output temporal coefficients for mu and sigma.
"""
Y = self.mlp_conv.apply(X)
Y = Y.dimshuffle(0,2,3,1)
if self.n_hidden_dense_lower > 0:
n_images = X.shape[0].astype('int16')
X = X.reshape((n_images, self.n_colors*self.spatial_width**2))
Y_dense = self.mlp_dense_lower.apply(X)
Y_dense = Y_dense.reshape((n_images, self.spatial_width, self.spatial_width,
self.n_hidden_dense_lower_output))
Y = T.concatenate([Y/T.sqrt(self.n_hidden_conv),
Y_dense/T.sqrt(self.n_hidden_dense_lower_output)], axis=3)
Z = self.mlp_dense_upper.apply(Y)
return Z
| mit | 2,050,625,259,363,093,200 | 42.341969 | 128 | 0.618649 | false |
downpoured/labs_coordinate_pictures | src/tools/ben_python_img/img_tests.py | 1 | 15862 |
import img_utils
import img_convert_resize
import img_resize_keep_exif
import PIL
from PIL import Image
import sys
sys.path.append('bn_python_common.zip')
from bn_python_common import *
def img_utils_testGetMarkFromFilename():
# tests splitting a filename that contains the "__MARKAS__" marker.
assertEq(('/test/file.jpg', '123'), img_utils.getMarkFromFilename('/test/file__MARKAS__123.jpg'))
assertEq(('/test/file.also.jpg', '123'), img_utils.getMarkFromFilename('/test/file.also__MARKAS__123.jpg'))
assertEq(('/test/file.jpg', ''), img_utils.getMarkFromFilename('/test/file__MARKAS__.jpg'))
assertException(lambda: img_utils.getMarkFromFilename(
'/test/dirmark__MARKAS__b/file__MARKAS__123.jpg'), ValueError, 'Directories')
assertException(lambda: img_utils.getMarkFromFilename(
'/test/dirmark__MARKAS__b/file.jpg'), ValueError, 'Directories')
assertException(lambda: img_utils.getMarkFromFilename(
'/test/file__MARKAS__123__MARKAS__123.jpg'), ValueError, 'exactly one marker')
assertException(lambda: img_utils.getMarkFromFilename(
'/test/file.jpg'), ValueError, 'exactly one marker')
assertException(lambda: img_utils.getMarkFromFilename(
'/test/file__MARKAS__123.foo.jpg'), ValueError, 'after the marker')
def img_utils_testGetFilesWithWrongExtension(tmpDir):
# looks for files that do not have the given extension.
tmpDirExt = files.join(tmpDir, 'testWrongExtension')
files.makedirs(tmpDirExt)
files.writeall(files.join(tmpDirExt, 'a.jpg'), 'content')
files.writeall(files.join(tmpDirExt, 'B.JPG'), 'content')
files.writeall(files.join(tmpDirExt, 'c.jpg'), 'content')
files.writeall(files.join(tmpDirExt, 'd.txt'), 'content')
files.writeall(files.join(tmpDirExt, 'e'), 'content')
files.makedirs(tmpDirExt + '/subdir')
fnGetFiles = files.listfiles
setRet = img_utils.getFilesWrongExtension(tmpDirExt, fnGetFiles, 'jpg')
expected = [files.join(tmpDirExt, 'd.txt'), files.join(tmpDirExt, 'e')]
assertEq(expected, list(sorted(f[0] for f in setRet)))
def img_convert_testGetNewSizeFromResizeSpec():
# common valid cases
assertEq((50, 100), img_convert_resize.getNewSizeFromResizeSpec('50%', 100, 200))
assertEq((90, 180), img_convert_resize.getNewSizeFromResizeSpec('90%', 101, 201))
assertEq((80, 160), img_convert_resize.getNewSizeFromResizeSpec('80h', 100, 200))
assertEq((160, 80), img_convert_resize.getNewSizeFromResizeSpec('80h', 200, 100))
assertEq((5, 10), img_convert_resize.getNewSizeFromResizeSpec('5%', 100, 200))
# invalid spec
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50x', 100, 200), ValueError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50', 100, 200), ValueError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('0.5%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec(' 50%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50% ', 100, 200), ValueError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50%%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('50%50%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('0%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('00%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('h', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('1a0%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('1a0h', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('110%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('-10%', 100, 200), AssertionError)
assertException(lambda: img_convert_resize.getNewSizeFromResizeSpec('-10h', 100, 200), AssertionError)
# cases not to resize.
assertEq((0, 0), img_convert_resize.getNewSizeFromResizeSpec('100%', 100, 200))
assertEq((0, 0), img_convert_resize.getNewSizeFromResizeSpec('101h', 100, 200))
assertEq((0, 0), img_convert_resize.getNewSizeFromResizeSpec('101h', 200, 100))
def img_resize_keep_exif_testActualFiles(tmpDir):
trace('img_resize_keep_exif_testActualFiles started.')
tmpDir = files.join(tmpDir, 'testResizeKeepExif')
files.makedirs(tmpDir)
# create initial files
im = createTestImage(96, 144, 1)
filenames = [files.join(tmpDir, 'a100p__MARKAS__100%.jpg'),
files.join(tmpDir, 'a50p__MARKAS__50%.jpg'),
files.join(tmpDir, 'a32h__MARKAS__32h.jpg'),
files.join(tmpDir, 'a200h__MARKAS__200h.jpg')]
for filename in filenames:
im.save(filename)
del im
for index, filename in enumerate(filenames):
assertEq((96, 144), img_utils.getImageDims(filename))
# set an obscure tag that won't be transferred
img_utils.setExifField(filename, 'ProfileCopyright', 'ObscureTagSet' + str(index))
assertEq('ObscureTagSet' + str(index), img_utils.readExifField(filename, 'ProfileCopyright'))
# set a common tag that will be transferred
img_utils.setExifField(filename, 'Make', 'TestingMake' + str(index))
assertEq('TestingMake' + str(index), img_utils.readExifField(filename, 'Make'))
# run the resizes. resizeAllAndKeepExif resizes based on the filename.
img_resize_keep_exif.resizeAllAndKeepExif(tmpDir,
recurse=False, storeOriginalFilename=True, storeExifFromOriginal=True, jpgHighQualityChromaSampling=False)
# check dimensions
assertEq((96, 144), img_utils.getImageDims(files.join(tmpDir, 'a100p.jpg')))
assertEq((48, 72), img_utils.getImageDims(files.join(tmpDir, 'a50p.jpg')))
assertEq((32, 48), img_utils.getImageDims(files.join(tmpDir, 'a32h.jpg')))
assertEq((96, 144), img_utils.getImageDims(files.join(tmpDir, 'a200h.jpg')))
# check common tag, should have been transferred
assertEq('TestingMake0', img_utils.readExifField(files.join(tmpDir, 'a100p.jpg'), 'Make'))
assertEq('TestingMake1', img_utils.readExifField(files.join(tmpDir, 'a50p.jpg'), 'Make'))
assertEq('TestingMake2', img_utils.readExifField(files.join(tmpDir, 'a32h.jpg'), 'Make'))
assertEq('TestingMake3', img_utils.readExifField(files.join(tmpDir, 'a200h.jpg'), 'Make'))
# check uncommon tag, should only be present for the ones moved instead of resized
assertEq('ObscureTagSet0', img_utils.readExifField(files.join(tmpDir, 'a100p.jpg'), 'ProfileCopyright'))
assertEq('', img_utils.readExifField(files.join(tmpDir, 'a50p.jpg'), 'ProfileCopyright'))
assertEq('', img_utils.readExifField(files.join(tmpDir, 'a32h.jpg'), 'ProfileCopyright'))
assertEq('ObscureTagSet3', img_utils.readExifField(files.join(tmpDir, 'a200h.jpg'), 'ProfileCopyright'))
# check that original filename is stored in exif data
assertEq('a100p.jpg', img_utils.readOriginalFilename(files.join(tmpDir, 'a100p.jpg')))
assertEq('a50p.jpg', img_utils.readOriginalFilename(files.join(tmpDir, 'a50p.jpg')))
assertEq('a32h.jpg', img_utils.readOriginalFilename(files.join(tmpDir, 'a32h.jpg')))
assertEq('a200h.jpg', img_utils.readOriginalFilename(files.join(tmpDir, 'a200h.jpg')))
expectedSizes = '''a100p.jpg|8524
a200h.jpg|8524
a200h__MARKAS__200h.jpg|8502
a32h.jpg|1335
a32h__MARKAS__32h.jpg|8502
a50p.jpg|2549
a50p__MARKAS__50%.jpg|8502'''.replace('\r\n', '\n')
resultSizes = '\n'.join([short + '|' + str(files.getsize(file))
for file, short in sorted(files.listfiles(tmpDir))])
assertEq(expectedSizes, resultSizes, 'current pillow version=%s' % PIL.PILLOW_VERSION)
trace('img_resize_keep_exif_testActualFiles passed.')
def img_resize_keep_exif_testCleanup(tmpDir):
# when the user has reviewed that the conversion looks correct, they'll run cleanup()
# which will discard the previous files with __MARKAS__.
trace('img_resize_keep_exif_testCleanup started.')
tmpDir = files.join(tmpDir, 'testCleanup')
files.makedirs(tmpDir)
files.writeall(files.join(tmpDir, 'a1.jpg'), '')
files.writeall(files.join(tmpDir, 'a1__MARKAS__50%.jpg'), '')
files.writeall(files.join(tmpDir, 'a2.jpg'), '')
files.writeall(files.join(tmpDir, 'a2__MARKAS__200h.jpg'), '')
files.writeall(files.join(tmpDir, 'a3.png'), '')
files.writeall(files.join(tmpDir, 'a3__MARKAS__100%.png'), '')
# file with no corresponding markas should not be deleted.
files.writeall(files.join(tmpDir, 'a4.jpg'), '')
# files with no corresponding converted file should not be deleted.
files.writeall(files.join(tmpDir, 'a5__MARKAS__100%.jpg'), '')
files.writeall(files.join(tmpDir, 'a6__MARKAS__.jpg'), '')
img_resize_keep_exif.cleanup(tmpDir, recurse=False, prompt=False)
expectedSizes = '''a1.jpg|0
a2.jpg|0
a3.png|0
a3__MARKAS__100%.png|0
a4.jpg|0
a5__MARKAS__100%.jpg|0
a6__MARKAS__.jpg|0'''.replace('\r\n', '\n')
resultSizes = '\n'.join([short + '|' + str(files.getsize(file))
for file, short in sorted(files.listfiles(tmpDir))])
assertEq(expectedSizes, resultSizes)
trace('img_resize_keep_exif_testCleanup passed.')
def assertExceptionOrFalse(fn, excType):
ret = False
try:
ret = fn()
except:
e = sys.exc_info()[1]
assertTrue(isinstance(e, excType), 'wrong exc type')
assertTrue(not ret)
def img_resize_keep_exif_testExifErrorsShouldRaise(tmpDir):
# most exif operations on an invalid jpg should raise PythonImgExifError
files.writeall(files.join(tmpDir, 'invalidjpg.jpg'), 'not a valid jpg')
files.writeall(files.join(tmpDir, 'invalidjpg2.jpg'), 'not a valid jpg')
assertExceptionOrFalse(lambda: not img_utils.readOriginalFilename(
files.join(tmpDir, 'invalidjpg.jpg')), img_utils.PythonImgExifError)
assertException(lambda: img_utils.stampJpgWithOriginalFilename(
files.join(tmpDir, 'invalidjpg.jpg'), 'test'), img_utils.PythonImgExifError)
assertException(lambda: img_utils.transferMostUsefulExifTags(
files.join(tmpDir, 'invalidjpg.jpg'),
files.join(tmpDir, 'invalidjpg2.jpg')), img_utils.PythonImgExifError)
assertException(lambda: img_utils.removeResolutionTags(
files.join(tmpDir, 'invalidjpg.jpg')), img_utils.PythonImgExifError)
class RNG(object):
# so that same sequence is generated regardless of Python version
def __init__(self, seed=0):
self.previous = seed
def next(self):
# use contants from glibc's rand()
modulus = 2**31 - 1
a, c = 1103515245, 12345
ret = (self.previous * a + c) % modulus
self.previous = ret
return ret
def createTestImage(width, height, seed):
rng = RNG(seed)
im = Image.new("RGB", (width, height))
for y in xrange(height):
for x in xrange(width):
v = rng.next() % 256
im.putpixel((x, y), (v, v, v))
return im
def testCombinatoricImageConversion(tmpDir, testImage):
# go from each format to every other format!
# note: bmp should be first in the list
formats = ['bmp', 'png', 'jpg', 'webp']
jpgQuality = 100
if not getInputBool('run combinatoricImageConversionTest?'):
return
for format in formats:
startfile = files.join(tmpDir, 'start.' + format)
if format == 'bmp':
testImage.save(startfile)
else:
img_convert_resize.convertOrResizeImage(files.join(tmpDir, 'start.bmp'),
startfile, jpgQuality=jpgQuality)
for outformat in formats:
if outformat != format:
outfile = startfile + '.' + outformat
assertTrue(not files.exists(outfile))
img_convert_resize.convertOrResizeImage(startfile, outfile, jpgQuality=jpgQuality)
assertTrue(files.exists(outfile))
expectedSizes = '''start.bmp|43254
start.bmp.jpg|15580
start.bmp.png|39430
start.bmp.webp|14454
start.jpg|15580
start.jpg.bmp|43254
start.jpg.png|39483
start.jpg.webp|14454
start.png|39430
start.png.bmp|43254
start.png.jpg|15580
start.png.webp|14454
start.webp|14454
start.webp.bmp|43254
start.webp.jpg|15580
start.webp.png|22366'''.replace('\r\n', '\n')
resultSizes = '\n'.join([short + '|' + str(files.getsize(file))
for file, short in sorted(files.listfiles(tmpDir)) if short.startswith('start')])
assertEq(expectedSizes, resultSizes)
# are bmps equivalent
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp'), files.join(tmpDir, 'start.png.bmp')))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp'), files.join(tmpDir, 'start.webp.bmp')))
# are jpgs equivalent
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.jpg'), files.join(tmpDir, 'start.jpg')))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.jpg'), files.join(tmpDir, 'start.png.jpg')))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.jpg'), files.join(tmpDir, 'start.webp.jpg')))
# are webps equivalent
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.webp'), files.join(tmpDir, 'start.png.webp')))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.webp'), files.join(tmpDir, 'start.webp')))
# are pngs equivalent
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp.png'), files.join(tmpDir, 'start.png')))
# png written by dwebp is different, but it should still roundtrip
img_convert_resize.convertOrResizeImage(files.join(tmpDir, 'start.webp.png'), files.join(tmpDir, 'start.webp.png.bmp'))
assertTrue(files.fileContentsEqual(files.join(tmpDir, 'start.bmp'), files.join(tmpDir, 'start.webp.png.bmp')))
def testJpgQualities(tmpDir, testImage):
# simply write several jpgs at different qualities, and make sure the file sizes are as expected.
tmpDir = files.join(tmpDir, 'testJpgQuality')
files.makedirs(tmpDir)
testImage.save(files.join(tmpDir, 'start.bmp'))
qualities = [100, 90, 60, 10]
for qual in qualities:
img_convert_resize.convertOrResizeImage(files.join(tmpDir, 'start.bmp'),
files.join(tmpDir, 'q%d.jpg'%qual), jpgQuality=qual)
expectedSizes = '''q10.jpg|993
q100.jpg|15580
q60.jpg|5120
q90.jpg|9406
start.bmp|43254'''.replace('\r\n', '\n')
resultSizes = '\n'.join([short + '|' + str(files.getsize(file))
for file, short in sorted(files.listfiles(tmpDir))])
assertEq(expectedSizes, resultSizes)
def img_convert_resize_tests(tmpDir):
width, height = 120, 120
testImage = createTestImage(width, height, 1)
testCombinatoricImageConversion(tmpDir, testImage)
testJpgQualities(tmpDir, testImage)
if __name__ == '__main__':
# passes on pillow 3.2, 3.3, 4.0
tmpDir = files.join(img_utils.getTempLocation(), 'testimgconvert')
if files.isdir(tmpDir):
files.rmtree(tmpDir)
files.makedirs(tmpDir)
try:
img_utils_testGetMarkFromFilename()
img_utils_testGetFilesWithWrongExtension(tmpDir)
img_resize_keep_exif_testActualFiles(tmpDir)
img_resize_keep_exif_testCleanup(tmpDir)
img_resize_keep_exif_testExifErrorsShouldRaise(tmpDir)
img_convert_testGetNewSizeFromResizeSpec()
img_convert_resize_tests(tmpDir)
finally:
files.rmtree(tmpDir)
| gpl-3.0 | 5,360,751,421,662,296,000 | 48.26087 | 123 | 0.696129 | false |
HoverHell/pyimapsmtpt | pyimapsmtpt/common.py | 1 | 3253 | # coding: utf8
import re
import logging
_log = logging.getLogger(__name__)
class EventProcessed(Exception):
""" A special exception to end the current event processing """
def to_bytes(val):
if isinstance(val, unicode):
return val.encode('utf-8')
return val
def get_html2text(config):
try:
import html2text
except Exception, exc:
_log.warning("html2text import failure: %r", exc)
return lambda s: s # dummy replacement
obj = html2text.HTML2Text(bodywidth=config.html2text_bodywidth)
obj.links_each_paragraph = config.html2text_links_each_paragraph
for k, v in config.html2text_etcetera.items():
setattr(obj, k, v)
## postprocess
def func(val, *ar, **kwa):
res = obj.handle(val, *ar, **kwa)
if config.html2text_strip:
res = res.strip()
return res
func.html2text_mod = html2text
func.html2text = obj
return func
def configure_logging(config):
import logging.config as logconf
if config.log_level is not None:
config.logging['root']['level'] = config.log_level
if config.log_file:
config.logging['handlers']['main_file']['filename'] = config.log_file
config.logging['root']['handlers'] = ['main_file']
logconf.dictConfig(config.logging)
def config_email_utf8():
""" Apparently, for created email, this makes the email module avoid using
base64 for encoding utf-8 email body parts. It also sets `output_charset`
to None. The exact reasons are still unclear. """
import email.charset
email.charset.add_charset(
'utf-8',
## Default: 3
header_enc=email.charset.SHORTEST,
## Default: 2
body_enc=None,
## Default: 'utf-8'
output_charset=None)
#######
## Library-independence for JIDs
#######
def jid_to_data(jid):
""" ...
:param jid: xmpp.protocol.JID instance or jid_data or string
"""
if isinstance(jid, dict):
assert 'node' in jid
assert 'domain' in jid
assert 'resource' in jid
return jid
if isinstance(jid, basestring):
return jid_string_to_data(jid)
## Probably an xmpp.protocol.JID or equivalent
return dict(node=jid.node, domain=jid.domain, resource=jid.resource)
def jid_data_to_string(jid_data, resource=True):
res = [
'%s@' % (jid_data['node'],) if jid_data['node'] else '',
jid_data['domain'],
'/%s' % (jid_data['resource'],) if resource else ''
]
return ''.join(res)
_re_optional = lambda s: r'(?:%s)?' % (s,)
_jid_re = ''.join([
r'^',
_re_optional(r'(?P<node>[^@]+)@'),
r'(?P<domain>[^/@]+)',
_re_optional(r'/(?P<resource>.*)'),
'$'])
# http://stackoverflow.com/a/1406200 - not perfectly strict but workable
_jid_re_strict = r'''^(?:([^@/<>'\"]+)@)?([^@/<>'\"]+)(?:/([^<>'\"]*))?$'''
def jid_string_to_data(jid_str, strict=True):
if strict:
m = re.match(_jid_re_strict, jid_str)
else:
m = re.match(_jid_re, jid_str)
if not m:
raise ValueError("Malformed JID", jid_str)
node, domain, resource = m.groups()
_pp = lambda v: v or ''
return dict(node=_pp(node), domain=_pp(domain), resource=_pp(resource))
| gpl-2.0 | -7,889,025,405,643,893,000 | 26.108333 | 78 | 0.597602 | false |
sunweaver/ganetimgr | apply/urls/user.py | 1 | 2001 | # -*- coding: utf-8 -*- vim:fileencoding=utf-8:
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.conf.urls.defaults import patterns, url
from apply import views
urlpatterns = patterns(
'',
url(r'^info/(?P<type>\w+)/(?P<usergroup>[\w\.\@-]+)/?$', views.user_info, name="user-info"),
url(r'^idle/$', views.idle_accounts, name="idle_accounts"),
url(r'^profile/$', views.profile, name="profile"),
url(r'^mail_change/$', views.mail_change, name="mail-change"),
url(r'^name_change/$', views.name_change, name="name-change"),
url(r'^other_change/$', views.other_change, name="other-change"),
url(r'^keys/$', views.user_keys, name="user-keys"),
url(r'^keys/delete/(?P<key_id>\d+)?$', views.delete_key, name="delete-key"),
url(r'^login/', 'django.contrib.auth.views.login', {'template_name': 'users/login.html'}, name="login"),
url(r'^logout/', 'django.contrib.auth.views.logout', {'next_page': '/'}, name="logout"),
url(r'^pass_change/$', 'django.contrib.auth.views.password_change', {'template_name':'users/pass_change.html', 'post_change_redirect':'done'}, name="pass_change"),
url(r'^pass_change/done/$', 'django.contrib.auth.views.password_change_done', {'template_name':'users/pass_change_done.html'}, name="pass_change_done" ),
url(r'^pass_change/notify/$', views.pass_notify, name="pass_change_notify"),
)
| gpl-3.0 | -2,009,622,974,093,488,000 | 54.583333 | 167 | 0.684658 | false |
luisxiaomai/robotframework-anywherelibrary | src/AnywhereLibrary/base/logging.py | 1 | 1083 | import os
#from robot.variables import GLOBAL_VARIABLES
from robot.libraries.BuiltIn import BuiltIn
from robot.api import logger
from keywordgroup import KeywordGroup
class Logging(KeywordGroup):
# Private
def _debug(self, message):
logger.debug(message)
def _get_log_dir(self):
logfile = GLOBAL_VARIABLES['${LOG FILE}']
if logfile != 'NONE':
return os.path.dirname(logfile)
return GLOBAL_VARIABLES['${OUTPUTDIR}']
def _html(self, message):
logger.info(message, True, False)
def _info(self, message):
logger.info(message)
def _log(self, message, level='INFO'):
level = level.upper()
if (level == 'INFO'): self._info(message)
elif (level == 'DEBUG'): self._debug(message)
elif (level == 'WARN'): self._warn(message)
elif (level == 'HTML'): self._html(message)
elif (level=='Error'):self._error(message)
def _error(self,message):
raise AssertionError(message)
def _warn(self, message):
logger.warn(message)
| mit | -5,113,758,825,957,334,000 | 27.5 | 53 | 0.615882 | false |
regebro/hovercraft | hovercraft/generate.py | 1 | 8341 | import os
import re
import shutil
from lxml import etree, html
from pkg_resources import resource_string
from .parse import rst2xml, SlideMaker
from .position import position_slides
from .template import (
Template,
CSS_RESOURCE,
JS_RESOURCE,
JS_POSITION_HEADER,
JS_POSITION_BODY,
OTHER_RESOURCE,
DIRECTORY_RESOURCE,
)
class ResourceResolver(etree.Resolver):
def resolve(self, url, pubid, context):
if url.startswith("resource:"):
prefix, filename = url.split(":", 1)
return self.resolve_string(resource_string(__name__, filename), context)
def rst2html(
filepath,
template_info,
auto_console=False,
skip_help=False,
skip_notes=False,
mathjax=False,
slide_numbers=False,
):
# Read the infile
with open(filepath, "rb") as infile:
rststring = infile.read()
presentation_dir = os.path.split(filepath)[0]
# First convert reST to XML
xml, dependencies = rst2xml(rststring, filepath)
tree = etree.fromstring(xml)
# Fix up the resulting XML so it makes sense
sm = SlideMaker(tree, skip_notes=skip_notes)
tree = sm.walk()
# Pick up CSS information from the tree:
for attrib in tree.attrib:
if attrib.startswith("css"):
if "-" in attrib:
dummy, media = attrib.split("-", 1)
else:
media = "screen,projection"
css_files = tree.attrib[attrib].split()
for css_file in css_files:
if media in ("console", "preview"):
# The "console" media is used to style the presenter
# console and does not need to be included in the header,
# but must be copied. So we add it as a non css file,
# even though it's a css-file.
template_info.add_resource(
os.path.abspath(os.path.join(presentation_dir, css_file)),
OTHER_RESOURCE,
target=css_file,
)
else:
# Add as a css resource:
template_info.add_resource(
os.path.abspath(os.path.join(presentation_dir, css_file)),
CSS_RESOURCE,
target=css_file,
extra_info=media,
)
elif attrib.startswith("js"):
if attrib == "js-header":
media = JS_POSITION_HEADER
else:
# Put javascript in body tag as default.
media = JS_POSITION_BODY
js_files = tree.attrib[attrib].split()
for js_file in js_files:
template_info.add_resource(
os.path.abspath(os.path.join(presentation_dir, js_file)),
JS_RESOURCE,
target=js_file,
extra_info=media,
)
if sm.need_mathjax and mathjax:
if mathjax.startswith("http"):
template_info.add_resource(
None, JS_RESOURCE, target=mathjax, extra_info=JS_POSITION_HEADER
)
else:
# Local copy
template_info.add_resource(mathjax, DIRECTORY_RESOURCE, target="mathjax")
template_info.add_resource(
None,
JS_RESOURCE,
target="mathjax/MathJax.js?config=TeX-MML-AM_CHTML",
extra_info=JS_POSITION_HEADER,
)
# Position all slides
position_slides(tree)
# Add the template info to the tree:
tree.append(template_info.xml_node())
# If the console-should open automatically, set an attribute on the document:
if auto_console:
tree.attrib["auto-console"] = "True"
# If the console-should open automatically, set an attribute on the document:
if skip_help:
tree.attrib["skip-help"] = "True"
# If the slide numbers should be displayed, set an attribute on the document:
if slide_numbers:
tree.attrib["slide-numbers"] = "True"
# We need to set up a resolver for resources, so we can include the
# reST.xsl file if so desired.
parser = etree.XMLParser()
parser.resolvers.add(ResourceResolver())
# Transform the tree to HTML
xsl_tree = etree.fromstring(template_info.xsl, parser)
transformer = etree.XSLT(xsl_tree)
tree = transformer(tree)
result = html.tostring(tree)
return template_info.doctype + result, dependencies
def copy_resource(filename, sourcedir, targetdir):
if filename[0] == "/" or ":" in filename:
# Absolute path or URI: Do nothing
return None # No monitoring needed
sourcepath = os.path.join(sourcedir, filename)
targetpath = os.path.join(targetdir, filename)
if os.path.exists(targetpath) and os.path.getmtime(sourcepath) <= os.path.getmtime(
targetpath
):
# File has not changed since last copy, so skip.
return sourcepath # Monitor this file
targetdir = os.path.split(targetpath)[0]
if not os.path.exists(targetdir):
os.makedirs(targetdir)
shutil.copy2(sourcepath, targetpath)
return sourcepath # Monitor this file
def generate(args):
"""Generates the presentation and returns a list of files used"""
source_files = {args.presentation}
# Parse the template info
template_info = Template(args.template)
if args.css:
presentation_dir = os.path.split(args.presentation)[0]
target_path = os.path.relpath(args.css, presentation_dir)
template_info.add_resource(
args.css, CSS_RESOURCE, target=target_path, extra_info="all"
)
source_files.add(args.css)
if args.js:
presentation_dir = os.path.split(args.presentation)[0]
target_path = os.path.relpath(args.js, presentation_dir)
template_info.add_resource(
args.js, JS_RESOURCE, target=target_path, extra_info=JS_POSITION_BODY
)
source_files.add(args.js)
# Make the resulting HTML
htmldata, dependencies = rst2html(
args.presentation,
template_info,
args.auto_console,
args.skip_help,
args.skip_notes,
args.mathjax,
args.slide_numbers,
)
source_files.update(dependencies)
# Write the HTML out
if not os.path.exists(args.targetdir):
os.makedirs(args.targetdir)
with open(os.path.join(args.targetdir, "index.html"), "wb") as outfile:
outfile.write(htmldata)
# Copy supporting files
source_files.update(template_info.copy_resources(args.targetdir))
# Copy files from the source:
sourcedir = os.path.split(os.path.abspath(args.presentation))[0]
tree = html.fromstring(htmldata)
for image in tree.iterdescendants("img"):
filename = image.attrib["src"]
source_files.add(copy_resource(filename, sourcedir, args.targetdir))
for source in tree.iterdescendants('source'):
filename = source.attrib['src']
source_files.add(copy_resource(filename, sourcedir, args.targetdir))
RE_CSS_URL = re.compile(br"""url\(['"]?(.*?)['"]?[\)\?\#]""")
# Copy any files referenced by url() in the css-files:
for resource in template_info.resources:
if resource.resource_type != CSS_RESOURCE:
continue
# path in CSS is relative to CSS file; construct source/dest accordingly
css_base = template_info.template_root if resource.is_in_template else sourcedir
css_sourcedir = os.path.dirname(os.path.join(css_base, resource.filepath))
css_targetdir = os.path.dirname(
os.path.join(args.targetdir, resource.final_path())
)
uris = RE_CSS_URL.findall(template_info.read_data(resource))
uris = [uri.decode() for uri in uris]
if resource.is_in_template and template_info.builtin_template:
for filename in uris:
template_info.add_resource(
filename, OTHER_RESOURCE, target=css_targetdir, is_in_template=True
)
else:
for filename in uris:
source_files.add(copy_resource(filename, css_sourcedir, css_targetdir))
# All done!
return {os.path.abspath(f) for f in source_files if f}
| mit | -971,954,912,299,686,000 | 33.754167 | 88 | 0.602686 | false |
GluuFederation/cluster-tools | recovery/recovery.py | 1 | 10339 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2016 Gluu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import logging
import os
import socket
import subprocess
import sys
import time
DATABASE_URI = "/var/lib/gluuengine/db/shared.json"
DATABASE_URI_COMPAT = "/var/lib/gluu-cluster/db/shared.json"
RECOVERY_PRIORITY_CHOICES = {
"ldap": 1,
"oxauth": 2,
"oxtrust": 3,
"oxidp": 4,
"nginx": 5,
}
logger = logging.getLogger("recovery")
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
fmt = logging.Formatter('[%(levelname)s] %(message)s')
ch.setFormatter(fmt)
logger.addHandler(ch)
def load_database():
"""Loads JSON-based database as Python object.
"""
data = []
if not any(map(os.path.exists, [DATABASE_URI, DATABASE_URI_COMPAT])):
logger.warn("unable to read {} or {}".format(DATABASE_URI, DATABASE_URI_COMPAT)) # noqa
sys.exit(1)
with open(DATABASE_URI) as fp:
data = json.loads(fp.read())
return data
def get_current_cluster():
"""Gets a cluster.
"""
data = load_database()
clusters = [item for _, item in data.get("clusters", {}).iteritems()]
try:
cluster = clusters[0]
except IndexError:
cluster = {}
return cluster
def get_node(hostname=""):
"""Gets node based.
:param hostname: Hostname; if omitted, will check for FQDN or hostname
from socket connection.
"""
data = load_database()
nodes = [
item for _, item in data.get("nodes", {}).iteritems()
if item["name"] in (hostname, socket.getfqdn(), socket.gethostname(),)
]
try:
node = nodes[0]
except IndexError:
node = {}
return node
def get_containers(node_id):
"""Gets all containers belong to certain node.
:param node_id: ID of the node.
"""
data = load_database()
containers = []
for _, item in data.get("containers", {}).iteritems():
if item["node_id"] == node_id and item["state"] == "SUCCESS":
# adds recovery_priority
item["recovery_priority"] = RECOVERY_PRIORITY_CHOICES.get(
item["type"], 0
)
containers.append(item)
return containers
def safe_subprocess_exec(cmd):
"""Runs shell command safely.
:param cmd: String of command.
"""
cmdlist = cmd.strip().split()
ppn = subprocess.Popen(
cmdlist,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = ppn.communicate()
return out.strip(), err.strip(), ppn.returncode
def container_stopped(container_id):
"""Checks whether a container is stopped.
:param container_id: ID of the container assigned by docker daemon.
"""
out, _, _ = safe_subprocess_exec("docker inspect {}".format(container_id))
data = json.loads(out)
return data[0]["State"]["Running"] is False
def container_exists(container_id):
"""Checks whether a container exists.
:param container_id: ID of the container assigned by docker daemon.
"""
out, _, _ = safe_subprocess_exec("docker inspect {}".format(container_id))
data = json.loads(out)
return len(data) > 0
def restart_container(container_id):
"""Restarts a container regardless its state.
:param container_id: ID of the container assigned by docker daemon.
"""
return safe_subprocess_exec("docker restart {}".format(container_id))
def add_dns(container_id, hostname):
"""Adds DNS entry to weavedns.
:param container_id: ID of the container assigned by docker daemon.
:param hostname: Hostname that should be added into weavedns.
"""
return safe_subprocess_exec("weave dns-add {} -h {}".format(
container_id, hostname
))
def detach_ip(container_id):
"""Detaches container from weave network.
:param container_id: ID of the container assigned by docker daemon.
"""
safe_subprocess_exec("weave detach {}".format(container_id))
def httpd_crashed(container_id):
"""Checks whether httpd process managed by supervisor is crashed or not.
:param container_id: ID of the container assigned by docker daemon.
"""
out, _, _ = safe_subprocess_exec(
"docker exec {} supervisorctl status httpd".format(container_id)
)
return "RUNNING" not in out
def weave_component_ready(name):
delay = 10
max_retry = 6
retry_attempt = 0
component_ready = False
while retry_attempt < max_retry:
if container_stopped(name):
logger.warn("{} is not ready; retrying ...".format(name))
time.sleep(delay)
retry_attempt += 1
else:
component_ready = True
break
return component_ready
def recover_containers(node_id, ox_cluster_hostname):
"""Recovers all containers.
:param node_id: ID of the node.
:param ox_cluster_hostname: Name of IDP server.
"""
containers = sorted(get_containers(node_id),
key=lambda x: x["recovery_priority"])
for container in containers:
if not container_exists(container["cid"]):
continue
if not container_stopped(container["cid"]):
# no need to restart already running container
logger.info("{} container {} already running; skipping ...".format(
container["type"], container["name"],
))
continue
logger.info("restarting {} container {}".format(
container["type"], container["name"]
))
_, err, returncode = restart_container(container["cid"])
if returncode != 0:
# if restarting failed, continue to other containers
# and let this specific container stopped so we can
# retry the recovery process again
logger.warn(
"something is wrong while restarting "
"{} container {}; reason={}".format(
container["type"], container["name"], err
)
)
continue
# DISABLED container must be detached from weave network
if container["state"] == "DISABLED":
detach_ip(container["cid"])
continue
# manually re-adding DNS entry
logger.info("adding DNS entry {} for {} container {}".format(
container["hostname"], container["type"], container["name"]
))
add_dns(container["cid"], container["hostname"])
if container["type"] in ("ldap", "oxauth", "oxtrust",):
add_dns(container["cid"], "{}.weave.local".format(container["type"])) # noqa
if container["type"] == "ldap":
# introduce delay to wait for a running opendj instance
# before restarting other containers
logger.info("waiting for ldap server startup; "
"this may take a while ...")
time.sleep(20)
# if cluster hostname contains `weave.local` suffix, this extra DNS
# entry will be added into weavedns; pretty useful for setup which
# doesn't have resolvable domain name
if container["type"] == "nginx":
add_dns(container["cid"], ox_cluster_hostname)
# currently, only oxauth and oxidp use httpd
if container["type"] in ("oxauth", "oxidp"):
if httpd_crashed(container["cid"]):
# httpd refuses to work if previous shutdown was unclean
# a workaround is to remove ``/var/run/apache2/apache2.pid``
# before restarting supervisor program
cmd = "rm /var/run/apache2/apache2.pid " \
"&& supervisorctl restart httpd"
safe_subprocess_exec(
'''docker exec {} sh -c "{}"'''.format(container["cid"], cmd) # noqa
)
if __name__ == "__main__":
try:
logger.info("starting recovery process for current node; "
"this may take a while ...")
cluster = get_current_cluster()
if not cluster:
logger.warn("unable to find any cluster")
sys.exit(1)
node = get_node()
if not node:
logger.warn("unable to find node matches existing hostname")
sys.exit(1)
if not weave_component_ready("weave"):
logger.error("aborting recovery process due to weave being "
"not ready; please try again later ...")
sys.exit(1)
if not weave_component_ready("weaveproxy"):
logger.error("aborting recovery process due to weaveproxy being "
"not ready; please try again later ...")
sys.exit(1)
if not weave_component_ready("weaveplugin"):
logger.error("aborting recovery process due to weaveplugin being "
"not ready; please try again later ...")
sys.exit(1)
time.sleep(10)
recover_containers(node.get("id"), cluster.get("ox_cluster_hostname"))
logger.info("recovery process for current node is finished")
except KeyboardInterrupt:
logger.warn("recovery process aborted by user")
sys.exit(0)
| mit | 5,439,546,729,971,558,000 | 31.309375 | 96 | 0.610697 | false |
googleapis/google-cloud-cpp | google/cloud/storage/emulator/gcs/bucket.py | 1 | 18283 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement a class to simulate GCS buckets."""
import datetime
import hashlib
import json
import random
import re
import scalpl
import utils
from google.cloud.storage_v1.proto import storage_resources_pb2 as resources_pb2
from google.cloud.storage_v1.proto.storage_resources_pb2 import CommonEnums
from google.iam.v1 import policy_pb2
from google.protobuf import field_mask_pb2, json_format
class Bucket:
modifiable_fields = [
"acl",
"default_object_acl",
"lifecycle",
"cors",
"storage_class",
"default_event_based_hold",
"labels",
"website",
"versioning",
"logging",
"encryption",
"billing",
"retention_policy",
"location_type",
"iam_configuration",
]
rest_only_fields = ["iamConfiguration.publicAccessPrevention"]
def __init__(self, metadata, notifications, iam_policy, rest_only):
self.metadata = metadata
self.notifications = notifications
self.iam_policy = iam_policy
self.rest_only = rest_only
@classmethod
def __validate_bucket_name(cls, bucket_name, context):
valid = True
if "." in bucket_name:
valid &= len(bucket_name) <= 222
valid &= all([len(part) <= 63 for part in bucket_name.split(".")])
else:
valid &= len(bucket_name) <= 63
valid &= (
re.match("^[a-z0-9][a-z0-9._\\-]+[a-z0-9]$", bucket_name) is not None
)
valid &= not bucket_name.startswith("goog")
valid &= re.search("g[0o][0o]g[1l][e3]", bucket_name) is None
if not valid:
utils.error.invalid("Bucket name %s" % bucket_name, context)
@classmethod
def __preprocess_rest(cls, data):
proxy = scalpl.Cut(data)
keys = utils.common.nested_key(data)
proxy.pop("iamConfiguration.bucketPolicyOnly", False)
for key in keys:
if key.endswith("createdBefore"):
proxy[key] = proxy[key] + "T00:00:00Z"
rest_only = {}
for field in Bucket.rest_only_fields:
if field in proxy:
rest_only[field] = proxy.pop(field)
return proxy.data, rest_only
@classmethod
def __postprocess_rest(cls, data, rest_only):
proxy = scalpl.Cut(data)
keys = utils.common.nested_key(data)
for key in keys:
if key.endswith("createdBefore"):
proxy[key] = proxy[key].replace("T00:00:00Z", "")
proxy["kind"] = "storage#bucket"
proxy.update(rest_only)
return proxy.data
@classmethod
def __insert_predefined_acl(cls, metadata, predefined_acl, context):
if (
predefined_acl == ""
or predefined_acl
== CommonEnums.PredefinedBucketAcl.PREDEFINED_BUCKET_ACL_UNSPECIFIED
):
return
if metadata.iam_configuration.uniform_bucket_level_access.enabled:
utils.error.invalid(
"Predefined ACL with uniform bucket level access enabled", context
)
acls = utils.acl.compute_predefined_bucket_acl(
metadata.name, predefined_acl, context
)
del metadata.acl[:]
metadata.acl.extend(acls)
@classmethod
def __insert_predefined_default_object_acl(
cls, metadata, predefined_default_object_acl, context
):
if (
predefined_default_object_acl == ""
or predefined_default_object_acl
== CommonEnums.PredefinedObjectAcl.PREDEFINED_OBJECT_ACL_UNSPECIFIED
):
return
if metadata.iam_configuration.uniform_bucket_level_access.enabled:
utils.error.invalid(
"Predefined Default Object ACL with uniform bucket level access enabled",
context,
)
acls = utils.acl.compute_predefined_default_object_acl(
metadata.name, predefined_default_object_acl, context
)
del metadata.default_object_acl[:]
metadata.default_object_acl.extend(acls)
# === INITIALIZATION === #
@classmethod
def init(cls, request, context, rest_only=None):
time_created = datetime.datetime.now()
metadata = None
if context is not None:
metadata = request.bucket
else:
metadata, rest_only = cls.__preprocess_rest(json.loads(request.data))
metadata = json_format.ParseDict(metadata, resources_pb2.Bucket())
cls.__validate_bucket_name(metadata.name, context)
default_projection = CommonEnums.Projection.NO_ACL
if len(metadata.acl) != 0 or len(metadata.default_object_acl) != 0:
default_projection = CommonEnums.Projection.FULL
is_uniform = metadata.iam_configuration.uniform_bucket_level_access.enabled
metadata.iam_configuration.uniform_bucket_level_access.enabled = False
if len(metadata.acl) == 0:
predefined_acl = utils.acl.extract_predefined_acl(request, False, context)
if (
predefined_acl
== CommonEnums.PredefinedBucketAcl.PREDEFINED_BUCKET_ACL_UNSPECIFIED
):
predefined_acl = (
CommonEnums.PredefinedBucketAcl.BUCKET_ACL_PROJECT_PRIVATE
)
elif predefined_acl == "":
predefined_acl = "projectPrivate"
elif is_uniform:
utils.error.invalid(
"Predefined ACL with uniform bucket level access enabled", context
)
cls.__insert_predefined_acl(metadata, predefined_acl, context)
if len(metadata.default_object_acl) == 0:
predefined_default_object_acl = utils.acl.extract_predefined_default_object_acl(
request, context
)
if (
predefined_default_object_acl
== CommonEnums.PredefinedObjectAcl.PREDEFINED_OBJECT_ACL_UNSPECIFIED
):
predefined_default_object_acl = (
CommonEnums.PredefinedObjectAcl.OBJECT_ACL_PROJECT_PRIVATE
)
elif predefined_default_object_acl == "":
predefined_default_object_acl = "projectPrivate"
elif is_uniform:
utils.error.invalid(
"Predefined Default Object ACL with uniform bucket level access enabled",
context,
)
cls.__insert_predefined_default_object_acl(
metadata, predefined_default_object_acl, context
)
metadata.iam_configuration.uniform_bucket_level_access.enabled = is_uniform
metadata.id = metadata.name
metadata.project_number = int(utils.acl.PROJECT_NUMBER)
metadata.metageneration = 0
metadata.etag = hashlib.md5(metadata.name.encode("utf-8")).hexdigest()
metadata.time_created.FromDatetime(time_created)
metadata.updated.FromDatetime(time_created)
metadata.owner.entity = utils.acl.get_project_entity("owners", context)
metadata.owner.entity_id = hashlib.md5(
metadata.owner.entity.encode("utf-8")
).hexdigest()
if rest_only is None:
rest_only = {}
return (
cls(metadata, {}, cls.__init_iam_policy(metadata, context), rest_only),
utils.common.extract_projection(request, default_projection, context),
)
# === IAM === #
@classmethod
def __init_iam_policy(cls, metadata, context):
role_mapping = {
"READER": "roles/storage.legacyBucketReader",
"WRITER": "roles/storage.legacyBucketWriter",
"OWNER": "roles/storage.legacyBucketOwner",
}
bindings = []
for entry in metadata.acl:
legacy_role = entry.role
if legacy_role is None or entry.entity is None:
utils.error.invalid("ACL entry", context)
role = role_mapping.get(legacy_role)
if role is None:
utils.error.invalid("Legacy role %s" % legacy_role, context)
bindings.append(policy_pb2.Binding(role=role, members=[entry.entity]))
return policy_pb2.Policy(
version=1,
bindings=bindings,
etag=datetime.datetime.now().isoformat().encode("utf-8"),
)
def get_iam_policy(self, request, context):
return self.iam_policy
def set_iam_policy(self, request, context):
policy = None
if context is not None:
policy = request.iam_request.policy
else:
data = json.loads(request.data)
if "iam_request" in data:
data = data["iam_request"]["policy"]
data.pop("kind", None)
data.pop("etag", None)
data.pop("resourceId", None)
policy = json_format.ParseDict(data, policy_pb2.Policy())
self.iam_policy = policy
self.iam_policy.etag = datetime.datetime.now().isoformat().encode("utf-8")
return self.iam_policy
# === METADATA === #
def __update_metadata(self, source, update_mask):
if update_mask is None:
update_mask = field_mask_pb2.FieldMask(paths=Bucket.modifiable_fields)
update_mask.MergeMessage(source, self.metadata, True, True)
if self.metadata.versioning.enabled:
self.metadata.metageneration += 1
self.metadata.updated.FromDatetime(datetime.datetime.now())
def update(self, request, context):
metadata = None
if context is not None:
metadata = request.metadata
else:
metadata, rest_only = self.__preprocess_rest(json.loads(request.data))
self.rest_only.update(rest_only)
metadata = json_format.ParseDict(metadata, resources_pb2.Bucket())
self.__update_metadata(metadata, None)
self.__insert_predefined_acl(
metadata, utils.acl.extract_predefined_acl(request, False, context), context
)
self.__insert_predefined_default_object_acl(
metadata,
utils.acl.extract_predefined_default_object_acl(request, context),
context,
)
def patch(self, request, context):
update_mask = field_mask_pb2.FieldMask()
metadata = None
if context is not None:
metadata = request.metadata
update_mask = request.update_mask
else:
data = json.loads(request.data)
if "labels" in data:
if data["labels"] is None:
self.metadata.labels.clear()
else:
for key, value in data["labels"].items():
if value is None:
self.metadata.labels.pop(key, None)
else:
self.metadata.labels[key] = value
data.pop("labels", None)
data, rest_only = self.__preprocess_rest(data)
self.rest_only.update(rest_only)
metadata = json_format.ParseDict(data, resources_pb2.Bucket())
paths = set()
for key in utils.common.nested_key(data):
key = utils.common.to_snake_case(key)
head = key
for i, c in enumerate(key):
if c == "." or c == "[":
head = key[0:i]
break
if head in Bucket.modifiable_fields:
if "[" in key:
paths.add(head)
else:
paths.add(key)
update_mask = field_mask_pb2.FieldMask(paths=list(paths))
self.__update_metadata(metadata, update_mask)
self.__insert_predefined_acl(
metadata, utils.acl.extract_predefined_acl(request, False, context), context
)
self.__insert_predefined_default_object_acl(
metadata,
utils.acl.extract_predefined_default_object_acl(request, context),
context,
)
# === ACL === #
def __search_acl(self, entity, must_exist, context):
entity = utils.acl.get_canonical_entity(entity)
for i in range(len(self.metadata.acl)):
if self.metadata.acl[i].entity == entity:
return i
if must_exist:
utils.error.notfound("ACL %s" % entity, context)
def __upsert_acl(self, entity, role, context):
# For simplicity, we treat `insert`, `update` and `patch` ACL the same way.
index = self.__search_acl(entity, False, context)
acl = utils.acl.create_bucket_acl(self.metadata.name, entity, role, context)
if index is not None:
self.metadata.acl[index].CopyFrom(acl)
return self.metadata.acl[index]
else:
self.metadata.acl.append(acl)
return acl
def get_acl(self, entity, context):
index = self.__search_acl(entity, True, context)
return self.metadata.acl[index]
def insert_acl(self, request, context):
entity, role = "", ""
if context is not None:
entity, role = (
request.bucket_access_control.entity,
request.bucket_access_control.role,
)
else:
payload = json.loads(request.data)
entity, role = payload["entity"], payload["role"]
return self.__upsert_acl(entity, role, context)
def update_acl(self, request, entity, context):
role = ""
if context is not None:
role = request.bucket_access_control.role
else:
payload = json.loads(request.data)
role = payload["role"]
return self.__upsert_acl(entity, role, context)
def patch_acl(self, request, entity, context):
role = ""
if context is not None:
role = request.bucket_access_control.role
else:
payload = json.loads(request.data)
role = payload["role"]
return self.__upsert_acl(entity, role, context)
def delete_acl(self, entity, context):
del self.metadata.acl[self.__search_acl(entity, True, context)]
# === DEFAULT OBJECT ACL === #
def __search_default_object_acl(self, entity, must_exist, context):
entity = utils.acl.get_canonical_entity(entity)
for i in range(len(self.metadata.default_object_acl)):
if self.metadata.default_object_acl[i].entity == entity:
return i
if must_exist:
utils.error.notfound("Default Object ACL %s" % entity, context)
def __upsert_default_object_acl(self, entity, role, context):
# For simplicity, we treat `insert`, `update` and `patch` Default Object ACL the same way.
index = self.__search_default_object_acl(entity, False, context)
acl = utils.acl.create_default_object_acl(
self.metadata.name, entity, role, context
)
if index is not None:
self.metadata.default_object_acl[index].CopyFrom(acl)
return self.metadata.default_object_acl[index]
else:
self.metadata.default_object_acl.append(acl)
return acl
def get_default_object_acl(self, entity, context):
index = self.__search_default_object_acl(entity, True, context)
return self.metadata.default_object_acl[index]
def insert_default_object_acl(self, request, context):
entity, role = "", ""
if context is not None:
entity, role = (
request.object_access_control.entity,
request.object_access_control.role,
)
else:
payload = json.loads(request.data)
entity, role = payload["entity"], payload["role"]
return self.__upsert_default_object_acl(entity, role, context)
def update_default_object_acl(self, request, entity, context):
role = ""
if context is not None:
role = request.object_access_control.role
else:
payload = json.loads(request.data)
role = payload["role"]
return self.__upsert_default_object_acl(entity, role, context)
def patch_default_object_acl(self, request, entity, context):
role = ""
if context is not None:
role = request.object_access_control.role
else:
payload = json.loads(request.data)
role = payload["role"]
return self.__upsert_default_object_acl(entity, role, context)
def delete_default_object_acl(self, entity, context):
del self.metadata.default_object_acl[
self.__search_default_object_acl(entity, True, context)
]
# === NOTIFICATIONS === #
def insert_notification(self, request, context):
notification = None
if context is not None:
notification = request.notification
else:
notification = json_format.ParseDict(
json.loads(request.data), resources_pb2.Notification()
)
notification.id = "notification-%d" % random.getrandbits(16)
self.notifications[notification.id] = notification
return notification
def get_notification(self, notification_id, context):
return self.notifications[notification_id]
def delete_notification(self, notification_id, context):
del self.notifications[notification_id]
# === RESPONSE === #
def rest(self):
response = json_format.MessageToDict(self.metadata)
return Bucket.__postprocess_rest(response, self.rest_only)
| apache-2.0 | 2,046,264,609,927,782,400 | 37.9 | 98 | 0.587322 | false |
mancoast/CPythonPyc_test | cpython/234_test_sax.py | 1 | 19309 | # regression test for SAX 2.0 -*- coding: iso-8859-1 -*-
# $Id: test_sax.py,v 1.24.16.1 2004/03/20 08:20:03 fdrake Exp $
from xml.sax import make_parser, ContentHandler, \
SAXException, SAXReaderNotAvailable, SAXParseException
try:
make_parser()
except SAXReaderNotAvailable:
# don't try to test this module if we cannot create a parser
raise ImportError("no XML parsers available")
from xml.sax.saxutils import XMLGenerator, escape, unescape, quoteattr, \
XMLFilterBase
from xml.sax.expatreader import create_parser
from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
from cStringIO import StringIO
from test.test_support import verify, verbose, TestFailed, findfile
import os
# ===== Utilities
tests = 0
failures = []
def confirm(outcome, name):
global tests
tests = tests + 1
if outcome:
if verbose:
print "Failed", name
else:
failures.append(name)
def test_make_parser2():
try:
# Creating parsers several times in a row should succeed.
# Testing this because there have been failures of this kind
# before.
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
except:
return 0
else:
return p
# ===========================================================================
#
# saxutils tests
#
# ===========================================================================
# ===== escape
def test_escape_basic():
return escape("Donald Duck & Co") == "Donald Duck & Co"
def test_escape_all():
return escape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_escape_extra():
return escape("Hei på deg", {"å" : "å"}) == "Hei på deg"
# ===== unescape
def test_unescape_basic():
return unescape("Donald Duck & Co") == "Donald Duck & Co"
def test_unescape_all():
return unescape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_unescape_extra():
return unescape("Hei på deg", {"å" : "å"}) == "Hei på deg"
def test_unescape_amp_extra():
return unescape("&foo;", {"&foo;": "splat"}) == "&foo;"
# ===== quoteattr
def test_quoteattr_basic():
return quoteattr("Donald Duck & Co") == '"Donald Duck & Co"'
def test_single_quoteattr():
return (quoteattr('Includes "double" quotes')
== '\'Includes "double" quotes\'')
def test_double_quoteattr():
return (quoteattr("Includes 'single' quotes")
== "\"Includes 'single' quotes\"")
def test_single_double_quoteattr():
return (quoteattr("Includes 'single' and \"double\" quotes")
== "\"Includes 'single' and "double" quotes\"")
# ===== make_parser
def test_make_parser():
try:
# Creating a parser should succeed - it should fall back
# to the expatreader
p = make_parser(['xml.parsers.no_such_parser'])
except:
return 0
else:
return p
# ===== XMLGenerator
start = '<?xml version="1.0" encoding="iso-8859-1"?>\n'
def test_xmlgen_basic():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc></doc>"
def test_xmlgen_content():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("huhei")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc>huhei</doc>"
def test_xmlgen_pi():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.processingInstruction("test", "data")
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<?test data?><doc></doc>"
def test_xmlgen_content_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("<huhei&")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc><huhei&</doc>"
def test_xmlgen_attr_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"a": '"'})
gen.startElement("e", {"a": "'"})
gen.endElement("e")
gen.startElement("e", {"a": "'\""})
gen.endElement("e")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start \
+ "<doc a='\"'><e a=\"'\"></e><e a=\"'"\"></e></doc>"
def test_xmlgen_ignorable():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.ignorableWhitespace(" ")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc> </doc>"
ns_uri = "http://www.python.org/xml-ns/saxtest/"
def test_xmlgen_ns():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping("ns1", ns_uri)
gen.startElementNS((ns_uri, "doc"), "ns1:doc", {})
# add an unqualified name
gen.startElementNS((None, "udoc"), None, {})
gen.endElementNS((None, "udoc"), None)
gen.endElementNS((ns_uri, "doc"), "ns1:doc")
gen.endPrefixMapping("ns1")
gen.endDocument()
return result.getvalue() == start + \
('<ns1:doc xmlns:ns1="%s"><udoc></udoc></ns1:doc>' %
ns_uri)
# ===== XMLFilterBase
def test_filter_basic():
result = StringIO()
gen = XMLGenerator(result)
filter = XMLFilterBase()
filter.setContentHandler(gen)
filter.startDocument()
filter.startElement("doc", {})
filter.characters("content")
filter.ignorableWhitespace(" ")
filter.endElement("doc")
filter.endDocument()
return result.getvalue() == start + "<doc>content </doc>"
# ===========================================================================
#
# expatreader tests
#
# ===========================================================================
# ===== XMLReader support
def test_expat_file():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(open(findfile("test"+os.extsep+"xml")))
return result.getvalue() == xml_test_out
# ===== DTDHandler support
class TestDTDHandler:
def __init__(self):
self._notations = []
self._entities = []
def notationDecl(self, name, publicId, systemId):
self._notations.append((name, publicId, systemId))
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._entities.append((name, publicId, systemId, ndata))
def test_expat_dtdhandler():
parser = create_parser()
handler = TestDTDHandler()
parser.setDTDHandler(handler)
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY img SYSTEM "expat.gif" NDATA GIF>\n')
parser.feed(' <!NOTATION GIF PUBLIC "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN">\n')
parser.feed(']>\n')
parser.feed('<doc></doc>')
parser.close()
return handler._notations == [("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)] and \
handler._entities == [("img", None, "expat.gif", "GIF")]
# ===== EntityResolver support
class TestEntityResolver:
def resolveEntity(self, publicId, systemId):
inpsrc = InputSource()
inpsrc.setByteStream(StringIO("<entity/>"))
return inpsrc
def test_expat_entityresolver():
parser = create_parser()
parser.setEntityResolver(TestEntityResolver())
result = StringIO()
parser.setContentHandler(XMLGenerator(result))
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY test SYSTEM "whatever">\n')
parser.feed(']>\n')
parser.feed('<doc>&test;</doc>')
parser.close()
return result.getvalue() == start + "<doc><entity></entity></doc>"
# ===== Attributes support
class AttrGatherer(ContentHandler):
def startElement(self, name, attrs):
self._attrs = attrs
def startElementNS(self, name, qname, attrs):
self._attrs = attrs
def test_expat_attrs_empty():
parser = create_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
return verify_empty_attrs(gather._attrs)
def test_expat_attrs_wattr():
parser = create_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc attr='val'/>")
parser.close()
return verify_attrs_wattr(gather._attrs)
def test_expat_nsattrs_empty():
parser = create_parser(1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
return verify_empty_nsattrs(gather._attrs)
def test_expat_nsattrs_wattr():
parser = create_parser(1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc xmlns:ns='%s' ns:attr='val'/>" % ns_uri)
parser.close()
attrs = gather._attrs
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
(attrs.getQNames() == [] or attrs.getQNames() == ["ns:attr"]) and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs[(ns_uri, "attr")] == "val"
# ===== InputSource support
xml_test_out = open(findfile("test"+os.extsep+"xml"+os.extsep+"out")).read()
def test_expat_inpsource_filename():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test"+os.extsep+"xml"))
return result.getvalue() == xml_test_out
def test_expat_inpsource_sysid():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(InputSource(findfile("test"+os.extsep+"xml")))
return result.getvalue() == xml_test_out
def test_expat_inpsource_stream():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
inpsrc = InputSource()
inpsrc.setByteStream(open(findfile("test"+os.extsep+"xml")))
parser.parse(inpsrc)
return result.getvalue() == xml_test_out
# ===== IncrementalParser support
def test_expat_incremental():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
return result.getvalue() == start + "<doc></doc>"
def test_expat_incremental_reset():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("text")
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.reset()
parser.feed("<doc>")
parser.feed("text")
parser.feed("</doc>")
parser.close()
return result.getvalue() == start + "<doc>text</doc>"
# ===== Locator support
def test_expat_locator_noinfo():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
return parser.getSystemId() is None and \
parser.getPublicId() is None and \
parser.getLineNumber() == 1
def test_expat_locator_withinfo():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.parse(findfile("test.xml"))
return parser.getSystemId() == findfile("test.xml") and \
parser.getPublicId() is None
# ===========================================================================
#
# error reporting
#
# ===========================================================================
def test_expat_inpsource_location():
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
source = InputSource()
source.setByteStream(StringIO("<foo bar foobar>")) #ill-formed
name = "a file name"
source.setSystemId(name)
try:
parser.parse(source)
except SAXException, e:
return e.getSystemId() == name
def test_expat_incomplete():
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
try:
parser.parse(StringIO("<foo>"))
except SAXParseException:
return 1 # ok, error found
else:
return 0
def test_sax_parse_exception_str():
# pass various values from a locator to the SAXParseException to
# make sure that the __str__() doesn't fall apart when None is
# passed instead of an integer line and column number
#
# use "normal" values for the locator:
str(SAXParseException("message", None,
DummyLocator(1, 1)))
# use None for the line number:
str(SAXParseException("message", None,
DummyLocator(None, 1)))
# use None for the column number:
str(SAXParseException("message", None,
DummyLocator(1, None)))
# use None for both:
str(SAXParseException("message", None,
DummyLocator(None, None)))
return 1
class DummyLocator:
def __init__(self, lineno, colno):
self._lineno = lineno
self._colno = colno
def getPublicId(self):
return "pubid"
def getSystemId(self):
return "sysid"
def getLineNumber(self):
return self._lineno
def getColumnNumber(self):
return self._colno
# ===========================================================================
#
# xmlreader tests
#
# ===========================================================================
# ===== AttributesImpl
def verify_empty_attrs(attrs):
try:
attrs.getValue("attr")
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName("attr")
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs["attr"]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key("attr") and \
attrs.keys() == [] and \
attrs.get("attrs") is None and \
attrs.get("attrs", 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def verify_attrs_wattr(attrs):
return attrs.getLength() == 1 and \
attrs.getNames() == ["attr"] and \
attrs.getQNames() == ["attr"] and \
len(attrs) == 1 and \
attrs.has_key("attr") and \
attrs.keys() == ["attr"] and \
attrs.get("attr") == "val" and \
attrs.get("attr", 25) == "val" and \
attrs.items() == [("attr", "val")] and \
attrs.values() == ["val"] and \
attrs.getValue("attr") == "val" and \
attrs.getValueByQName("attr") == "val" and \
attrs.getNameByQName("attr") == "attr" and \
attrs["attr"] == "val" and \
attrs.getQNameByName("attr") == "attr"
def test_attrs_empty():
return verify_empty_attrs(AttributesImpl({}))
def test_attrs_wattr():
return verify_attrs_wattr(AttributesImpl({"attr" : "val"}))
# ===== AttributesImpl
def verify_empty_nsattrs(attrs):
try:
attrs.getValue((ns_uri, "attr"))
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("ns:attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("ns:attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName((ns_uri, "attr"))
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs[(ns_uri, "attr")]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [] and \
attrs.get((ns_uri, "attr")) is None and \
attrs.get((ns_uri, "attr"), 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def test_nsattrs_empty():
return verify_empty_nsattrs(AttributesNSImpl({}, {}))
def test_nsattrs_wattr():
attrs = AttributesNSImpl({(ns_uri, "attr") : "val"},
{(ns_uri, "attr") : "ns:attr"})
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
attrs.getQNames() == ["ns:attr"] and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs.getValueByQName("ns:attr") == "val" and \
attrs.getNameByQName("ns:attr") == (ns_uri, "attr") and \
attrs[(ns_uri, "attr")] == "val" and \
attrs.getQNameByName((ns_uri, "attr")) == "ns:attr"
# ===== Main program
def make_test_output():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test"+os.extsep+"xml"))
outf = open(findfile("test"+os.extsep+"xml"+os.extsep+"out"), "w")
outf.write(result.getvalue())
outf.close()
items = locals().items()
items.sort()
for (name, value) in items:
if name[ : 5] == "test_":
confirm(value(), name)
if verbose:
print "%d tests, %d failures" % (tests, len(failures))
if failures:
raise TestFailed("%d of %d tests failed: %s"
% (len(failures), tests, ", ".join(failures)))
| gpl-3.0 | -2,254,858,870,572,087,000 | 26.782734 | 117 | 0.5702 | false |
sio2project/oioioi | oioioi/szkopul/views.py | 1 | 2838 | import django
from django.conf import settings
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from oioioi.base.main_page import register_main_page_view
from oioioi.contests.controllers import submission_template_context
from oioioi.contests.models import Submission
from oioioi.contests.processors import recent_contests
from oioioi.contests.utils import visible_contests
from oioioi.problems.utils import filter_my_all_visible_submissions
from oioioi.szkopul.menu import navbar_links_registry
navbar_links_registry.register(
name='contests_list',
text=_("Contests"),
url_generator=lambda request: reverse('select_contest'),
order=100,
)
navbar_links_registry.register(
name='problemset',
text=_("Problemset"),
url_generator=lambda request: reverse('problemset_main'),
order=200,
)
navbar_links_registry.register(
name='task_archive',
text=_("Task archive"),
url_generator=lambda request: reverse('task_archive'),
order=300,
)
# TODO Add Portals main page to the menu:
# navbar_links_registry.register(
# name='portals',
# text=_("Portals"),
# ...
# )
@register_main_page_view(order=100)
def main_page_view(request):
navbar_links = navbar_links_registry.template_context(request)
to_show = getattr(settings, 'NUM_RECENT_CONTESTS', 7)
rcontests = recent_contests(request)
contests = list(visible_contests(request).difference(rcontests))
contests.sort(key=lambda x: x.creation_date, reverse=True)
contests = (rcontests + contests)[:to_show]
submissions = []
show_scores = False
if request.user.is_authenticated:
queryset = Submission.objects.filter(user=request.user).order_by('-date')
to_show = getattr(settings, 'NUM_PANEL_SUBMISSIONS', 7)
# limit queryset size, because filtering all submissions is slow
queryset = queryset[:to_show]
limit_queryset_ids = [submission.id for submission in queryset]
queryset = Submission.objects.filter(id__in=limit_queryset_ids).select_related(
'user',
'problem_instance',
'problem_instance__contest',
'problem_instance__round',
'problem_instance__problem',
)
submissions_list = filter_my_all_visible_submissions(
request, queryset
).order_by('-date')
submissions = [
submission_template_context(request, s) for s in submissions_list
]
show_scores = any(s['can_see_score'] for s in submissions)
context = {
'navbar_links': navbar_links,
'contests': contests,
'submissions': submissions,
'show_scores': show_scores,
}
return TemplateResponse(request, 'main-page.html', context)
| gpl-3.0 | -4,068,311,784,596,601,300 | 32.388235 | 87 | 0.686399 | false |
osuripple/pep.py | objects/chatFilters.py | 1 | 1250 | class chatFilters:
def __init__(self, fileName="filters.txt"):
"""
Initialize chat filters
:param fileName: name of the file containing filters. Default: filters.txt
"""
self.filters = {}
self.loadFilters(fileName)
def loadFilters(self, fileName="filters.txt"):
"""
Load filters from a file
:param fileName: name of the file containing filters. Default: filters.txt
:return:
"""
# Reset chat filters
self.filters = {}
# Open filters file
with open(fileName, "r") as f:
# Read all lines
data = f.readlines()
# Process each line
for line in data:
# Get old/new word and save it in dictionary
lineSplit = line.split("=")
self.filters[lineSplit[0].lower()] = lineSplit[1].replace("\n", "")
def filterMessage(self, message):
"""
Replace forbidden words with filtered ones
:param message: normal message
:return: filtered message
"""
return message
"""
# Split words by spaces
messageTemp = message.split(" ")
# Check each word
for word in messageTemp:
lowerWord = word.lower()
# If the word is filtered, replace it
if lowerWord in self.filters:
message = message.replace(word, self.filters[lowerWord])
# Return filtered message
return message
"""
| agpl-3.0 | -5,941,986,136,163,623,000 | 22.148148 | 76 | 0.672 | false |
jef-n/QGIS | tests/src/python/providertestbase.py | 1 | 52878 | # -*- coding: utf-8 -*-
"""QGIS Unit test utils for provider tests.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import str
from builtins import object
__author__ = 'Matthias Kuhn'
__date__ = '2015-04-27'
__copyright__ = 'Copyright 2015, The QGIS Project'
from qgis.core import (
QgsApplication,
QgsRectangle,
QgsFeatureRequest,
QgsFeature,
QgsGeometry,
QgsAbstractFeatureIterator,
QgsExpressionContextScope,
QgsExpressionContext,
QgsExpression,
QgsVectorDataProvider,
QgsVectorLayerFeatureSource,
QgsFeatureSink,
QgsTestUtils,
QgsFeatureSource,
NULL
)
from qgis.PyQt.QtCore import QDate, QTime, QDateTime
from qgis.PyQt.QtTest import QSignalSpy
from utilities import compareWkt
from featuresourcetestbase import FeatureSourceTestCase
class ProviderTestCase(FeatureSourceTestCase):
'''
This is a collection of tests for vector data providers and kept generic.
To make use of it, subclass it and set self.source to a provider you want to test.
Make sure that your provider uses the default dataset by converting one of the provided datasets from the folder
tests/testdata/provider to a dataset your provider is able to handle.
To test expression compilation, add the methods `enableCompiler()` and `disableCompiler()` to your subclass.
If these methods are present, the tests will ensure that the result of server side and client side expression
evaluation are equal.
'''
def uncompiledFilters(self):
""" Individual derived provider tests should override this to return a list of expressions which
cannot be compiled """
return set()
def enableCompiler(self):
"""By default there is no expression compiling available, needs to be overridden in subclass"""
print('Provider does not support compiling')
return False
def partiallyCompiledFilters(self):
""" Individual derived provider tests should override this to return a list of expressions which
should be partially compiled """
return set()
def assert_query(self, source, expression, expected):
FeatureSourceTestCase.assert_query(self, source, expression, expected)
if self.compiled:
# Check compilation status
it = source.getFeatures(QgsFeatureRequest().setFilterExpression(expression))
if expression in self.uncompiledFilters():
self.assertEqual(it.compileStatus(), QgsAbstractFeatureIterator.NoCompilation)
elif expression in self.partiallyCompiledFilters():
self.assertEqual(it.compileStatus(), QgsAbstractFeatureIterator.PartiallyCompiled)
else:
self.assertEqual(it.compileStatus(), QgsAbstractFeatureIterator.Compiled, expression)
def runGetFeatureTests(self, source):
FeatureSourceTestCase.runGetFeatureTests(self, source)
# combination of an uncompilable expression and limit
feature = next(self.vl.getFeatures('pk=4'))
context = QgsExpressionContext()
scope = QgsExpressionContextScope()
scope.setVariable('parent', feature)
context.appendScope(scope)
request = QgsFeatureRequest()
request.setExpressionContext(context)
request.setFilterExpression('"pk" = attribute(@parent, \'pk\')')
request.setLimit(1)
values = [f['pk'] for f in self.vl.getFeatures(request)]
self.assertEqual(values, [4])
def runPolyGetFeatureTests(self, provider):
assert len([f for f in provider.getFeatures()]) == 4
# geometry
self.assert_query(provider, 'x($geometry) < -70', [1])
self.assert_query(provider, 'y($geometry) > 79', [1, 2])
self.assert_query(provider, 'xmin($geometry) < -70', [1, 3])
self.assert_query(provider, 'ymin($geometry) < 76', [3])
self.assert_query(provider, 'xmax($geometry) > -68', [2, 3])
self.assert_query(provider, 'ymax($geometry) > 80', [1, 2])
self.assert_query(provider, 'area($geometry) > 10', [1])
self.assert_query(provider, 'perimeter($geometry) < 12', [2, 3])
self.assert_query(provider,
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\')) = \'FF2FF1212\'',
[1, 3])
self.assert_query(provider,
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'), \'****F****\')',
[1, 3])
self.assert_query(provider,
'crosses($geometry,geom_from_wkt( \'Linestring (-68.2 82.1, -66.95 82.1, -66.95 79.05)\'))',
[2])
self.assert_query(provider,
'overlaps($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'))',
[2])
self.assert_query(provider,
'within($geometry,geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
[1])
self.assert_query(provider,
'overlaps(translate($geometry,-1,-1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
[1])
self.assert_query(provider,
'overlaps(buffer($geometry,1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
[1, 3])
self.assert_query(provider,
'intersects(centroid($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
[2])
self.assert_query(provider,
'intersects(point_on_surface($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
[1, 2])
self.assert_query(provider, 'distance($geometry,geom_from_wkt( \'Point (-70 70)\')) > 7', [1, 2])
def testGetFeaturesUncompiled(self):
self.compiled = False
try:
self.disableCompiler()
except AttributeError:
pass
self.runGetFeatureTests(self.source)
if hasattr(self, 'poly_provider'):
self.runPolyGetFeatureTests(self.poly_provider)
def testGetFeaturesExp(self):
if self.enableCompiler():
self.compiled = True
self.runGetFeatureTests(self.source)
if hasattr(self, 'poly_provider'):
self.runPolyGetFeatureTests(self.poly_provider)
def testSubsetString(self):
if not self.source.supportsSubsetString():
print('Provider does not support subset strings')
return
changed_spy = QSignalSpy(self.source.dataChanged)
subset = self.getSubsetString()
self.source.setSubsetString(subset)
self.assertEqual(self.source.subsetString(), subset)
self.assertEqual(len(changed_spy), 1)
# No signal should be emitted if the subset string is not modified
self.source.setSubsetString(subset)
self.assertEqual(len(changed_spy), 1)
result = set([f['pk'] for f in self.source.getFeatures()])
all_valid = (all(f.isValid() for f in self.source.getFeatures()))
self.source.setSubsetString(None)
expected = set([2, 3, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
self.assertTrue(all_valid)
# Subset string AND filter rect
self.source.setSubsetString(subset)
extent = QgsRectangle(-70, 70, -60, 75)
request = QgsFeatureRequest().setFilterRect(extent)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.source.setSubsetString(None)
expected = set([2])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
self.assertTrue(all_valid)
# Subset string AND filter rect, version 2
self.source.setSubsetString(subset)
extent = QgsRectangle(-71, 65, -60, 80)
result = set([f['pk'] for f in self.source.getFeatures(QgsFeatureRequest().setFilterRect(extent))])
self.source.setSubsetString(None)
expected = set([2, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
# Subset string AND expression
self.source.setSubsetString(subset)
request = QgsFeatureRequest().setFilterExpression('length("name")=5')
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.source.setSubsetString(None)
expected = set([2, 4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
self.assertTrue(all_valid)
# Subset string AND filter fid
ids = {f['pk']: f.id() for f in self.source.getFeatures()}
self.source.setSubsetString(subset)
request = QgsFeatureRequest().setFilterFid(4)
result = set([f.id() for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.source.setSubsetString(None)
expected = set([4])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
self.assertTrue(all_valid)
# Subset string AND filter fids
self.source.setSubsetString(subset)
request = QgsFeatureRequest().setFilterFids([ids[2], ids[4]])
result = set([f.id() for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.source.setSubsetString(None)
expected = set([ids[2], ids[4]])
assert set(expected) == result, 'Expected {} and got {} when testing subset string {}'.format(set(expected),
result, subset)
self.assertTrue(all_valid)
def getSubsetString(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"cnt" > 100 and "cnt" < 410'
def getSubsetString2(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"cnt" > 100 and "cnt" < 400'
def getSubsetString3(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"name"=\'Apple\''
def getSubsetStringNoMatching(self):
"""Individual providers may need to override this depending on their subset string formats"""
return '"name"=\'AppleBearOrangePear\''
def testGetFeaturesThreadSafety(self):
# no request
self.assertTrue(QgsTestUtils.testProviderIteratorThreadSafety(self.source))
# filter rect request
extent = QgsRectangle(-73, 70, -63, 80)
request = QgsFeatureRequest().setFilterRect(extent)
self.assertTrue(QgsTestUtils.testProviderIteratorThreadSafety(self.source, request))
def testOrderBy(self):
try:
self.disableCompiler()
except AttributeError:
pass
self.runOrderByTests()
def testOrderByCompiled(self):
if self.enableCompiler():
self.runOrderByTests()
def runOrderByTests(self):
FeatureSourceTestCase.runOrderByTests(self)
# Combination with subset of attributes
request = QgsFeatureRequest().addOrderBy('num_char', False).setSubsetOfAttributes(['pk'], self.vl.fields())
values = [f['pk'] for f in self.vl.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
def testOpenIteratorAfterLayerRemoval(self):
"""
Test that removing layer after opening an iterator does not crash. All required
information should be captured in the iterator's source and there MUST be no
links between the iterators and the layer's data provider
"""
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
# store the source
source = QgsVectorLayerFeatureSource(l)
# delete the layer
del l
# get the features
pks = []
for f in source.getFeatures():
pks.append(f['pk'])
self.assertEqual(set(pks), {1, 2, 3, 4, 5})
def testCloneLayer(self):
"""
Test that cloning layer works and has all expected features
"""
l = self.vl.clone()
pks = []
for f in l.getFeatures():
pks.append(f['pk'])
self.assertEqual(set(pks), {1, 2, 3, 4, 5})
def testGetFeaturesPolyFilterRectTests(self):
""" Test fetching features from a polygon layer with filter rect"""
try:
if not self.poly_provider:
return
except:
return
extent = QgsRectangle(-73, 70, -63, 80)
request = QgsFeatureRequest().setFilterRect(extent)
features = [f['pk'] for f in self.poly_provider.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
# Some providers may return the exact intersection matches (2, 3) even without the ExactIntersect flag, so we accept that too
assert set(features) == set([2, 3]) or set(features) == set([1, 2, 3]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# Test with exact intersection
request = QgsFeatureRequest().setFilterRect(extent).setFlags(QgsFeatureRequest.ExactIntersect)
features = [f['pk'] for f in self.poly_provider.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2, 3]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test with an empty rectangle
extent = QgsRectangle()
features = [f['pk'] for f in self.source.getFeatures(QgsFeatureRequest().setFilterRect(extent))]
assert set(features) == set([1, 2, 3, 4, 5]), 'Got {} instead'.format(features)
def testMinValue(self):
self.assertFalse(self.source.minimumValue(-1))
self.assertFalse(self.source.minimumValue(1000))
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('cnt')), -200)
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('name')), 'Apple')
if self.treat_datetime_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('dt')), '2020-05-03 12:13:14')
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('dt')),
QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)))
if self.treat_date_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')), '2020-05-02')
elif not self.treat_date_as_datetime():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')), QDate(2020, 5, 2))
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')),
QDateTime(2020, 5, 2, 0, 0, 0))
if not self.treat_time_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('time')), QTime(12, 13, 1))
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('time')), '12:13:01')
if self.source.supportsSubsetString():
subset = self.getSubsetString()
self.source.setSubsetString(subset)
min_value = self.source.minimumValue(self.source.fields().lookupField('cnt'))
self.source.setSubsetString(None)
self.assertEqual(min_value, 200)
def testMaxValue(self):
self.assertFalse(self.source.maximumValue(-1))
self.assertFalse(self.source.maximumValue(1000))
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('cnt')), 400)
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('name')), 'Pear')
if not self.treat_datetime_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('dt')),
QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('dt')), '2021-05-04 13:13:14')
if self.treat_date_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')), '2021-05-04')
elif not self.treat_date_as_datetime():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')), QDate(2021, 5, 4))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')),
QDateTime(2021, 5, 4, 0, 0, 0))
if not self.treat_time_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('time')), QTime(13, 13, 14))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('time')), '13:13:14')
if self.source.supportsSubsetString():
subset = self.getSubsetString2()
self.source.setSubsetString(subset)
max_value = self.source.maximumValue(self.source.fields().lookupField('cnt'))
self.source.setSubsetString(None)
self.assertEqual(max_value, 300)
def testExtent(self):
reference = QgsGeometry.fromRect(
QgsRectangle(-71.123, 66.33, -65.32, 78.3))
provider_extent = self.source.extent()
self.assertAlmostEqual(provider_extent.xMinimum(), -71.123, 3)
self.assertAlmostEqual(provider_extent.xMaximum(), -65.32, 3)
self.assertAlmostEqual(provider_extent.yMinimum(), 66.33, 3)
self.assertAlmostEqual(provider_extent.yMaximum(), 78.3, 3)
def testExtentSubsetString(self):
if self.source.supportsSubsetString():
# with only one point
subset = self.getSubsetString3()
self.source.setSubsetString(subset)
count = self.source.featureCount()
provider_extent = self.source.extent()
self.source.setSubsetString(None)
self.assertEqual(count, 1)
self.assertAlmostEqual(provider_extent.xMinimum(), -68.2, 3)
self.assertAlmostEqual(provider_extent.xMaximum(), -68.2, 3)
self.assertAlmostEqual(provider_extent.yMinimum(), 70.8, 3)
self.assertAlmostEqual(provider_extent.yMaximum(), 70.8, 3)
# with no points
subset = self.getSubsetStringNoMatching()
self.source.setSubsetString(subset)
count = self.source.featureCount()
provider_extent = self.source.extent()
self.source.setSubsetString(None)
self.assertEqual(count, 0)
self.assertTrue(provider_extent.isNull())
self.assertEqual(self.source.featureCount(), 5)
def testUnique(self):
self.assertEqual(self.source.uniqueValues(-1), set())
self.assertEqual(self.source.uniqueValues(1000), set())
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('cnt'))),
set([-200, 100, 200, 300, 400]))
assert set(['Apple', 'Honey', 'Orange', 'Pear', NULL]) == set(
self.source.uniqueValues(self.source.fields().lookupField('name'))), 'Got {}'.format(
set(self.source.uniqueValues(self.source.fields().lookupField('name'))))
if self.treat_datetime_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('dt'))),
set(['2021-05-04 13:13:14', '2020-05-04 12:14:14', '2020-05-04 12:13:14',
'2020-05-03 12:13:14', NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('dt'))),
set([QDateTime(2021, 5, 4, 13, 13, 14), QDateTime(2020, 5, 4, 12, 14, 14),
QDateTime(2020, 5, 4, 12, 13, 14), QDateTime(2020, 5, 3, 12, 13, 14), NULL]))
if self.treat_date_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set(['2020-05-03', '2020-05-04', '2021-05-04', '2020-05-02', NULL]))
elif self.treat_date_as_datetime():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set([QDateTime(2020, 5, 3, 0, 0, 0), QDateTime(2020, 5, 4, 0, 0, 0),
QDateTime(2021, 5, 4, 0, 0, 0), QDateTime(2020, 5, 2, 0, 0, 0), NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set([QDate(2020, 5, 3), QDate(2020, 5, 4), QDate(2021, 5, 4), QDate(2020, 5, 2), NULL]))
if self.treat_time_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('time'))),
set(['12:14:14', '13:13:14', '12:13:14', '12:13:01', NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('time'))),
set([QTime(12, 14, 14), QTime(13, 13, 14), QTime(12, 13, 14), QTime(12, 13, 1), NULL]))
if self.source.supportsSubsetString():
subset = self.getSubsetString2()
self.source.setSubsetString(subset)
values = self.source.uniqueValues(self.source.fields().lookupField('cnt'))
self.source.setSubsetString(None)
self.assertEqual(set(values), set([200, 300]))
def testUniqueStringsMatching(self):
self.assertEqual(self.source.uniqueStringsMatching(-1, 'a'), [])
self.assertEqual(self.source.uniqueStringsMatching(100001, 'a'), [])
field_index = self.source.fields().lookupField('name')
self.assertEqual(set(self.source.uniqueStringsMatching(field_index, 'a')), set(['Pear', 'Orange', 'Apple']))
# test case insensitive
self.assertEqual(set(self.source.uniqueStringsMatching(field_index, 'A')), set(['Pear', 'Orange', 'Apple']))
# test string ending in substring
self.assertEqual(set(self.source.uniqueStringsMatching(field_index, 'ney')), set(['Honey']))
# test limit
result = set(self.source.uniqueStringsMatching(field_index, 'a', 2))
self.assertEqual(len(result), 2)
self.assertTrue(result.issubset(set(['Pear', 'Orange', 'Apple'])))
assert set([u'Apple', u'Honey', u'Orange', u'Pear', NULL]) == set(
self.source.uniqueValues(field_index)), 'Got {}'.format(set(self.source.uniqueValues(field_index)))
if self.source.supportsSubsetString():
subset = self.getSubsetString2()
self.source.setSubsetString(subset)
values = self.source.uniqueStringsMatching(field_index, 'a')
self.source.setSubsetString(None)
self.assertEqual(set(values), set(['Pear', 'Apple']))
def testFeatureCount(self):
self.assertEqual(self.source.featureCount(), 5)
if self.source.supportsSubsetString():
# Add a subset string and test feature count
subset = self.getSubsetString()
self.source.setSubsetString(subset)
count = self.source.featureCount()
self.source.setSubsetString(None)
self.assertEqual(count, 3)
self.assertEqual(self.source.featureCount(), 5)
# one matching records
subset = self.getSubsetString3()
self.source.setSubsetString(subset)
count = self.source.featureCount()
self.source.setSubsetString(None)
self.assertEqual(count, 1)
self.assertEqual(self.source.featureCount(), 5)
# no matching records
subset = self.getSubsetStringNoMatching()
self.source.setSubsetString(subset)
count = self.source.featureCount()
self.source.setSubsetString(None)
self.assertEqual(count, 0)
self.assertEqual(self.source.featureCount(), 5)
def testEmpty(self):
self.assertFalse(self.source.empty())
self.assertEqual(self.source.hasFeatures(), QgsFeatureSource.FeaturesAvailable)
if self.source.supportsSubsetString():
try:
backup = self.source.subsetString()
# Add a subset string and test feature count
subset = self.getSubsetString()
self.source.setSubsetString(subset)
self.assertFalse(self.source.empty())
self.assertEqual(self.source.hasFeatures(), QgsFeatureSource.FeaturesAvailable)
subsetNoMatching = self.getSubsetStringNoMatching()
self.source.setSubsetString(subsetNoMatching)
self.assertTrue(self.source.empty())
self.assertEqual(self.source.hasFeatures(), QgsFeatureSource.NoFeaturesAvailable)
finally:
self.source.setSubsetString(None)
self.assertFalse(self.source.empty())
# If the provider supports tests on editable layers
if getattr(self, 'getEditableLayer', None):
l = self.getEditableLayer()
self.assertTrue(l.isValid())
self.assertEqual(l.hasFeatures(), QgsFeatureSource.FeaturesAvailable)
# Test that deleting some features in the edit buffer does not
# return empty, we accept FeaturesAvailable as well as
# MaybeAvailable
l.startEditing()
l.deleteFeature(next(l.getFeatures()).id())
self.assertNotEqual(l.hasFeatures(), QgsFeatureSource.NoFeaturesAvailable)
l.rollBack()
# Call truncate(), we need an empty set now
l.dataProvider().truncate()
self.assertTrue(l.dataProvider().empty())
self.assertEqual(l.dataProvider().hasFeatures(), QgsFeatureSource.NoFeaturesAvailable)
def testGetFeaturesNoGeometry(self):
""" Test that no geometry is present when fetching features without geometry"""
for f in self.source.getFeatures(QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)):
self.assertFalse(f.hasGeometry(), 'Expected no geometry, got one')
self.assertTrue(f.isValid())
def testAddFeature(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String', '15',
'2019-01-02 03:04:05' if self.treat_datetime_as_string() else QDateTime(2019, 1, 2, 3, 4, 5),
'2019-01-02' if self.treat_date_as_string() else QDateTime(2019, 1, 2, 0, 0,
0) if self.treat_date_as_datetime() else QDate(
2019, 1, 2),
'03:04:05' if self.treat_time_as_string() else QTime(3, 4, 5)])
f1.setGeometry(QgsGeometry.fromWkt('Point (-72.345 71.987)'))
f2 = QgsFeature()
f2.setAttributes([7, 330, 'Coconut', 'CoCoNut', '13',
'2018-05-06 07:08:09' if self.treat_datetime_as_string() else QDateTime(2018, 5, 6, 7, 8, 9),
'2018-05-06' if self.treat_date_as_string() else QDateTime(2018, 5, 6, 0, 0,
0) if self.treat_date_as_datetime() else QDate(
2018, 5, 6),
'07:08:09' if self.treat_time_as_string() else QTime(7, 8, 9)])
if l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
# expect success
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(result, 'Provider reported AddFeatures capability, but returned False to addFeatures')
f1.setId(added[0].id())
f2.setId(added[1].id())
# check result
self.testGetFeatures(l.dataProvider(), [f1, f2])
# add empty list, should return true for consistency
self.assertTrue(l.dataProvider().addFeatures([]))
# ensure that returned features have been given the correct id
f = next(l.getFeatures(QgsFeatureRequest().setFilterFid(added[0].id())))
self.assertTrue(f.isValid())
self.assertEqual(f['cnt'], -220)
f = next(l.getFeatures(QgsFeatureRequest().setFilterFid(added[1].id())))
self.assertTrue(f.isValid())
self.assertEqual(f['cnt'], 330)
else:
# expect fail
self.assertFalse(l.dataProvider().addFeatures([f1, f2]),
'Provider reported no AddFeatures capability, but returned true to addFeatures')
def testAddFeatureFastInsert(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
f1 = QgsFeature()
f1.setAttributes(
[6, -220, NULL, 'String', '15',
'2019-01-02 03:04:05' if self.treat_datetime_as_string() else QDateTime(2019, 1, 2, 3, 4, 5),
'2019-01-02' if self.treat_date_as_string() else QDateTime(2019, 1, 2, 0, 0, 0) if self.treat_date_as_datetime() else QDate(2019, 1, 2),
'03:04:05' if self.treat_time_as_string() else QTime(3, 4, 5)])
f1.setGeometry(QgsGeometry.fromWkt('Point (-72.345 71.987)'))
f2 = QgsFeature()
f2.setAttributes([7, 330, 'Coconut', 'CoCoNut', '13',
'2019-01-02 03:04:05' if self.treat_datetime_as_string() else QDateTime(2019, 1, 2, 3, 4, 5),
'2019-01-02' if self.treat_date_as_string() else QDateTime(2019, 1, 2, 0, 0, 0) if self.treat_date_as_datetime() else QDate(2019, 1, 2),
'03:04:05' if self.treat_time_as_string() else QTime(3, 4, 5)])
if l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
# expect success
result, added = l.dataProvider().addFeatures([f1, f2], QgsFeatureSink.FastInsert)
self.assertTrue(result, 'Provider reported AddFeatures capability, but returned False to addFeatures')
self.assertEqual(l.dataProvider().featureCount(), 7)
def testAddFeatureMissingAttributes(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
if not l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
return
# test that adding features with missing attributes pads out these
# attributes with NULL values to the correct length
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String'])
f2 = QgsFeature()
f2.setAttributes([7, 330])
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(result,
'Provider returned False to addFeatures with missing attributes. Providers should accept these features but add NULL attributes to the end of the existing attributes to the required field length.')
f1.setId(added[0].id())
f2.setId(added[1].id())
# check result - feature attributes MUST be padded out to required number of fields
f1.setAttributes([6, -220, NULL, 'String', 'NULL', NULL, NULL, NULL])
f2.setAttributes([7, 330, NULL, NULL, 'NULL', NULL, NULL, NULL])
self.testGetFeatures(l.dataProvider(), [f1, f2])
def testAddFeatureExtraAttributes(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
if not l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
return
# test that adding features with too many attributes drops these attributes
# we be more tricky and also add a valid feature to stress test the provider
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String', '15',
'2019-01-02 03:04:05' if self.treat_datetime_as_string() else QDateTime(2019, 1, 2, 3, 4, 5),
'2019-01-02' if self.treat_date_as_string() else QDateTime(2019, 1, 2, 0, 0, 0) if self.treat_date_as_datetime() else QDate(2019, 1, 2),
'03:04:05' if self.treat_time_as_string() else QTime(3, 4, 5)])
f2 = QgsFeature()
f2.setAttributes([7, -230, NULL, 'String', '15',
'2019-01-02 03:04:05' if self.treat_datetime_as_string() else QDateTime(2019, 1, 2, 3, 4, 5),
'2019-01-02' if self.treat_date_as_string() else QDateTime(2019, 1, 2, 0, 0, 0) if self.treat_date_as_datetime() else QDate(2019, 1, 2),
'03:04:05' if self.treat_time_as_string() else QTime(3, 4, 5), 15, 16, 17])
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertTrue(result,
'Provider returned False to addFeatures with extra attributes. Providers should accept these features but truncate the extra attributes.')
# make sure feature was added correctly
added = [f for f in l.dataProvider().getFeatures() if f['pk'] == 7][0]
self.assertEqual(added.attributes(), [7, -230, NULL, 'String', '15',
'2019-01-02 03:04:05' if self.treat_datetime_as_string() else QDateTime(
2019, 1, 2, 3, 4, 5),
'2019-01-02' if self.treat_date_as_string() else QDateTime(2019, 1, 2, 0, 0, 0) if self.treat_date_as_datetime() else QDate(2019, 1, 2),
'03:04:05' if self.treat_time_as_string() else QTime(3, 4, 5)])
def testAddFeatureWrongGeomType(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
if not l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
return
# test that adding features with incorrect geometry type rejects the feature
# we be more tricky and also add a valid feature to stress test the provider
f1 = QgsFeature()
f1.setGeometry(QgsGeometry.fromWkt('LineString (-72.345 71.987, -80 80)'))
f1.setAttributes([7])
f2 = QgsFeature()
f2.setGeometry(QgsGeometry.fromWkt('Point (-72.345 71.987)'))
f2.setAttributes([8])
result, added = l.dataProvider().addFeatures([f1, f2])
self.assertFalse(result,
'Provider returned True to addFeatures with incorrect geometry type. Providers should reject these features.')
# make sure feature was not added
added = [f for f in l.dataProvider().getFeatures() if f['pk'] == 7]
self.assertFalse(added)
# yet providers MUST always accept null geometries
f3 = QgsFeature()
f3.setAttributes([9])
result, added = l.dataProvider().addFeatures([f3])
self.assertTrue(result,
'Provider returned False to addFeatures with null geometry. Providers should always accept these features.')
# make sure feature was added correctly
added = [f for f in l.dataProvider().getFeatures() if f['pk'] == 9][0]
self.assertFalse(added.hasGeometry())
def testAddFeaturesUpdateExtent(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
self.assertEqual(l.dataProvider().extent().toString(1), '-71.1,66.3 : -65.3,78.3')
if l.dataProvider().capabilities() & QgsVectorDataProvider.AddFeatures:
f1 = QgsFeature()
f1.setAttributes([6, -220, NULL, 'String', '15'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-50 90)'))
l.dataProvider().addFeatures([f1])
l.dataProvider().updateExtents()
self.assertEqual(l.dataProvider().extent().toString(1), '-71.1,66.3 : -50.0,90.0')
def testDeleteFeatures(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
# find 2 features to delete
features = [f for f in l.dataProvider().getFeatures()]
to_delete = [f.id() for f in features if f.attributes()[0] in [1, 3]]
if l.dataProvider().capabilities() & QgsVectorDataProvider.DeleteFeatures:
# expect success
result = l.dataProvider().deleteFeatures(to_delete)
self.assertTrue(result, 'Provider reported DeleteFeatures capability, but returned False to deleteFeatures')
# check result
self.testGetFeatures(l.dataProvider(), skip_features=[1, 3])
# delete empty list, should return true for consistency
self.assertTrue(l.dataProvider().deleteFeatures([]))
else:
# expect fail
self.assertFalse(l.dataProvider().deleteFeatures(to_delete),
'Provider reported no DeleteFeatures capability, but returned true to deleteFeatures')
def testDeleteFeaturesUpdateExtent(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
self.assertEqual(l.dataProvider().extent().toString(1), '-71.1,66.3 : -65.3,78.3')
to_delete = [f.id() for f in l.dataProvider().getFeatures() if f.attributes()[0] in [5, 4]]
if l.dataProvider().capabilities() & QgsVectorDataProvider.DeleteFeatures:
l.dataProvider().deleteFeatures(to_delete)
l.dataProvider().updateExtents()
self.assertEqual(l.dataProvider().extent().toString(1), '-70.3,66.3 : -68.2,70.8')
def testTruncate(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
features = [f['pk'] for f in l.dataProvider().getFeatures()]
if l.dataProvider().capabilities() & QgsVectorDataProvider.FastTruncate or l.dataProvider().capabilities() & QgsVectorDataProvider.DeleteFeatures:
# expect success
result = l.dataProvider().truncate()
self.assertTrue(result,
'Provider reported FastTruncate or DeleteFeatures capability, but returned False to truncate()')
# check result
features = [f['pk'] for f in l.dataProvider().getFeatures()]
self.assertEqual(len(features), 0)
else:
# expect fail
self.assertFalse(l.dataProvider().truncate(),
'Provider reported no FastTruncate or DeleteFeatures capability, but returned true to truncate()')
def testChangeAttributes(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
# find 2 features to change
features = [f for f in l.dataProvider().getFeatures()]
# need to keep order here
to_change = [f for f in features if f.attributes()[0] == 1]
to_change.extend([f for f in features if f.attributes()[0] == 3])
# changes by feature id, for changeAttributeValues call
changes = {to_change[0].id(): {1: 501, 3: 'new string'}, to_change[1].id(): {1: 502, 4: 'NEW'}}
# changes by pk, for testing after retrieving changed features
new_attr_map = {1: {1: 501, 3: 'new string'}, 3: {1: 502, 4: 'NEW'}}
if l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeAttributeValues:
# expect success
result = l.dataProvider().changeAttributeValues(changes)
self.assertTrue(result,
'Provider reported ChangeAttributeValues capability, but returned False to changeAttributeValues')
# check result
self.testGetFeatures(l.dataProvider(), changed_attributes=new_attr_map)
# change empty list, should return true for consistency
self.assertTrue(l.dataProvider().changeAttributeValues({}))
else:
# expect fail
self.assertFalse(l.dataProvider().changeAttributeValues(changes),
'Provider reported no ChangeAttributeValues capability, but returned true to changeAttributeValues')
def testChangeGeometries(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
# find 2 features to change
features = [f for f in l.dataProvider().getFeatures()]
to_change = [f for f in features if f.attributes()[0] == 1]
to_change.extend([f for f in features if f.attributes()[0] == 3])
# changes by feature id, for changeGeometryValues call
changes = {to_change[0].id(): QgsGeometry.fromWkt('Point (10 20)'), to_change[1].id(): QgsGeometry()}
# changes by pk, for testing after retrieving changed features
new_geom_map = {1: QgsGeometry.fromWkt('Point ( 10 20 )'), 3: QgsGeometry()}
if l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeGeometries:
# expect success
result = l.dataProvider().changeGeometryValues(changes)
self.assertTrue(result,
'Provider reported ChangeGeometries capability, but returned False to changeGeometryValues')
# check result
self.testGetFeatures(l.dataProvider(), changed_geometries=new_geom_map)
# change empty list, should return true for consistency
self.assertTrue(l.dataProvider().changeGeometryValues({}))
else:
# expect fail
self.assertFalse(l.dataProvider().changeGeometryValues(changes),
'Provider reported no ChangeGeometries capability, but returned true to changeGeometryValues')
def testChangeFeatures(self):
if not getattr(self, 'getEditableLayer', None):
return
l = self.getEditableLayer()
self.assertTrue(l.isValid())
features = [f for f in l.dataProvider().getFeatures()]
# find 2 features to change attributes for
features = [f for f in l.dataProvider().getFeatures()]
# need to keep order here
to_change = [f for f in features if f.attributes()[0] == 1]
to_change.extend([f for f in features if f.attributes()[0] == 2])
# changes by feature id, for changeAttributeValues call
attribute_changes = {to_change[0].id(): {1: 501, 3: 'new string'}, to_change[1].id(): {1: 502, 4: 'NEW'}}
# changes by pk, for testing after retrieving changed features
new_attr_map = {1: {1: 501, 3: 'new string'}, 2: {1: 502, 4: 'NEW'}}
# find 2 features to change geometries for
to_change = [f for f in features if f.attributes()[0] == 1]
to_change.extend([f for f in features if f.attributes()[0] == 3])
# changes by feature id, for changeGeometryValues call
geometry_changes = {to_change[0].id(): QgsGeometry.fromWkt('Point (10 20)'), to_change[1].id(): QgsGeometry()}
# changes by pk, for testing after retrieving changed features
new_geom_map = {1: QgsGeometry.fromWkt('Point ( 10 20 )'), 3: QgsGeometry()}
if l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeGeometries and l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeAttributeValues:
# expect success
result = l.dataProvider().changeFeatures(attribute_changes, geometry_changes)
self.assertTrue(result,
'Provider reported ChangeGeometries and ChangeAttributeValues capability, but returned False to changeFeatures')
# check result
self.testGetFeatures(l.dataProvider(), changed_attributes=new_attr_map, changed_geometries=new_geom_map)
# change empty list, should return true for consistency
self.assertTrue(l.dataProvider().changeFeatures({}, {}))
elif not l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeGeometries:
# expect fail
self.assertFalse(l.dataProvider().changeFeatures(attribute_changes, geometry_changes),
'Provider reported no ChangeGeometries capability, but returned true to changeFeatures')
elif not l.dataProvider().capabilities() & QgsVectorDataProvider.ChangeAttributeValues:
# expect fail
self.assertFalse(l.dataProvider().changeFeatures(attribute_changes, geometry_changes),
'Provider reported no ChangeAttributeValues capability, but returned true to changeFeatures')
def testMinMaxAfterChanges(self):
"""
Tests retrieving field min and max value after making changes to the provider's features
"""
if not getattr(self, 'getEditableLayer', None):
return
vl = self.getEditableLayer()
self.assertTrue(vl.isValid())
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 5)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# add feature
f6 = QgsFeature()
f6.setAttributes([15, 1400])
res, [f6] = vl.dataProvider().addFeatures([f6])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 15)
self.assertEqual(vl.dataProvider().maximumValue(1), 1400)
f7 = QgsFeature()
f7.setAttributes([0, -1400])
res, [f7] = vl.dataProvider().addFeatures([f7])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), 0)
self.assertEqual(vl.dataProvider().minimumValue(1), -1400)
self.assertEqual(vl.dataProvider().maximumValue(0), 15)
self.assertEqual(vl.dataProvider().maximumValue(1), 1400)
# change attribute values
self.assertTrue(vl.dataProvider().changeAttributeValues({f6.id(): {1: 150}, f7.id(): {1: -100}}))
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# delete features
f1 = [f for f in vl.getFeatures() if f['pk'] == 5][0]
f3 = [f for f in vl.getFeatures() if f['pk'] == 3][0]
self.assertTrue(vl.dataProvider().deleteFeatures([f6.id(), f7.id()]))
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 5)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
if vl.dataProvider().capabilities() & QgsVectorDataProvider.DeleteAttributes:
# delete attributes
if vl.dataProvider().deleteAttributes([0]):
# may not be possible, e.g. if it's a primary key
self.assertEqual(vl.dataProvider().minimumValue(0), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 400)
def testStringComparison(self):
"""
Test if string comparisons with numbers are cast by the expression
compiler (or work fine without doing anything :P)
"""
for expression in (
'5 LIKE \'5\'',
'5 ILIKE \'5\'',
'15 NOT LIKE \'5\'',
'15 NOT ILIKE \'5\'',
'5 ~ \'5\''):
iterator = self.source.getFeatures(QgsFeatureRequest().setFilterExpression('5 LIKE \'5\''))
count = len([f for f in iterator])
self.assertEqual(count, 5)
self.assertFalse(iterator.compileFailed())
if self.enableCompiler():
iterator = self.source.getFeatures(QgsFeatureRequest().setFilterExpression('5 LIKE \'5\''))
self.assertEqual(count, 5)
self.assertFalse(iterator.compileFailed())
self.disableCompiler()
def testConcurrency(self):
"""
The connection pool has a maximum of 4 connections defined (+2 spare connections)
Make sure that if we exhaust those 4 connections and force another connection
it is actually using the spare connections and does not freeze.
This situation normally happens when (at least) 4 rendering threads are active
in parallel and one requires an expression to be evaluated.
"""
# Acquire the maximum amount of concurrent connections
iterators = list()
for i in range(QgsApplication.instance().maxConcurrentConnectionsPerPool()):
iterators.append(self.vl.getFeatures())
# Run an expression that will also do a request and should use a spare
# connection. It just should not deadlock here.
feat = next(iterators[0])
context = QgsExpressionContext()
context.setFeature(feat)
exp = QgsExpression('get_feature(\'{layer}\', \'pk\', 5)'.format(layer=self.vl.id()))
exp.evaluate(context)
def testEmptySubsetOfAttributesWithSubsetString(self):
if self.source.supportsSubsetString():
try:
# Add a subset string
subset = self.getSubsetString()
self.source.setSubsetString(subset)
# First test, in a regular way
features = [f for f in self.source.getFeatures()]
count = len(features)
self.assertEqual(count, 3)
has_geometry = features[0].hasGeometry()
# Ask for no attributes
request = QgsFeatureRequest().setSubsetOfAttributes([])
# Make sure we still retrieve features !
features = [f for f in self.source.getFeatures(request)]
count = len(features)
self.assertEqual(count, 3)
# Check that we still get a geometry if we add one before
self.assertEqual(features[0].hasGeometry(), has_geometry)
finally:
self.source.setSubsetString(None)
| gpl-2.0 | -9,024,574,573,250,967,000 | 47.780443 | 221 | 0.604429 | false |
Autostew/autostew | autostew_back/event_handlers/collision.py | 1 | 4077 | from autostew_back.event_handlers.base_event_handler import BaseEventHandler
from autostew_web_enums.models import EventType, ParticipantState
from autostew_web_session.models.event import Event
from autostew_web_session.models.participant import Participant
warn_at = 0.7
environment_crash_multiplier = 0.1
class HandleCollision(BaseEventHandler):
@classmethod
def can_consume(cls, server, event: Event):
return (
event.type.name == EventType.impact and
event.participant is not None
)
@classmethod
def consume(cls, server, event: Event):
magnitude = event.magnitude if event.human_to_human else int(event.magnitude * environment_crash_multiplier)
if event.ai_involved:
return
if event.participant.state.name != ParticipantState.racing:
return
if event.other_participant and event.other_participant.state.name != ParticipantState.racing:
return
if event.participant.is_player:
cls.add_crash_points(magnitude, event.participant, server, event.other_participant)
if event.other_participant and event.other_participant.is_player:
cls.add_crash_points(magnitude, event.other_participant, server, event.participant)
@classmethod
def add_crash_points(cls, crash_points_increase: int, participant: Participant, server, opponent: Participant=None):
if opponent:
crash_points_increase *= cls.get_interclass_multiplier(participant, opponent)
crash_points_increase = round(crash_points_increase)
participant.accumulated_crash_points += crash_points_increase
class_changed = participant.member.steam_user.add_crash_points(crash_points_increase)
cls.crash_notification(crash_points_increase, participant, server, opponent, class_changed)
if participant.member.steam_user.over_class_kick_impact_threshold(crash_points_increase):
participant.kick(server, server.back_crash_points_limit_ban_seconds)
if server.back_crash_points_limit and participant.accumulated_crash_points > server.back_crash_points_limit:
participant.kick(server, server.back_crash_points_limit_ban_seconds)
elif server.back_crash_points_limit and participant.accumulated_crash_points > warn_at * server.back_crash_points_limit:
cls.crash_limit_warning(participant, server)
@classmethod
def get_interclass_multiplier(cls, participant: Participant, opponent: Participant):
if (
opponent.member.steam_user.safety_class and
opponent.member.steam_user.safety_class.impact_weight and
participant.member.steam_user.safety_class and
participant.member.steam_user.safety_class.impact_weight and
participant.member.steam_user.safety_class.impact_weight < opponent.member.steam_user.safety_class.impact_weight
):
return participant.member.steam_user.safety_class.impact_weight / opponent.member.steam_user.safety_class.impact_weight
return 1
@classmethod
def crash_notification(cls, crash_points_increase, participant, server, opponent: Participant=None, class_changed=False):
participant.send_chat("", server)
if opponent:
participant.send_chat("CONTACT with {}".format(opponent.name), server)
participant.send_chat("CONTACT logged for {points} points.".format(points=crash_points_increase), server)
if class_changed:
participant.send_chat("Your SAFETY CLASS is now {}".format(participant.member.steam_user.safety_class), server)
@classmethod
def crash_limit_warning(cls, participant, server):
participant.send_chat(
"CONTACT: You have collected {points} crash points.".format(points=participant.accumulated_crash_points),
server
)
participant.send_chat(
"CONTACT: Disqualification at {max_crash_points} points.".format(max_crash_points=server.back_crash_points_limit),
server
)
| agpl-3.0 | 1,326,310,007,162,899,000 | 49.333333 | 131 | 0.704194 | false |
47lining/nucleator-core | lib/nucleator/cli/cli.py | 1 | 5116 | # Copyright 2015 47Lining LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nucleator.cli import properties
from nucleator.cli import utils
import sys, os, argparse
class Cli(object):
"""
An object and helper methods to represent an installation of Nucleator
"""
def __init__(self):
self.commands = {}
self.command_paths = []
# Setup the root argument parser
self.parser = argparse.ArgumentParser()
self.parser.add_argument("-v", "--verbosity", required=False, action="count", help="Increase output verbosity")
self.parser.add_argument("--debug-credentials", required=False, action='store_true', help="Show credential output for debugging purposes")
self.parser.add_argument("--no-debug-credentials", required=False, action='store_false', help="Dont show credential output")
self.parser.add_argument("-p", "--preview", required=False, action="store_true",
help="Display information about what a command will do, without actually executing the command.\n" +
"The --preview flag should come before any subcommands on the command line.")
self.parser.add_argument("-d", "--debug", required=False, action="store_true",
help="Turn on debugging mode")
self.subparsers = self.parser.add_subparsers(dest="command")
def core_path(self):
"""path to to core commands installed with Nucleator"""
return properties.core_path()
def contrib_path(self):
"""path to to contrib commands, added by user via Nucleator's update command"""
return properties.contrib_path()
def import_commands(self, path):
# too early to use self.parse()
debug = "--debug" in sys.argv or "-d" in sys.argv
if debug:
print ">>>IMPORT COMMANDS PATH: "+path
if not os.path.isdir(path):
if debug:
print ">>>IMPORT PATH NOT A DIR: "+path
# skip if path to import doesn't exist
return
sys.path.append(path)
# iterate through nucleator command definitions found as immediate subdirs of path
for command_dir in next(os.walk(path))[1]:
self.command_paths.append(os.path.join(path,command_dir))
if debug:
print ">>> IMPORT COMMAND_DIR: "+command_dir
candidate_location = os.path.join(path, command_dir, "commands")
if debug:
print ">>> IMPORT CANDIDATE LOCATION: "+candidate_location
import_candidates = os.listdir(candidate_location) if os.path.isdir(candidate_location) else []
# iterate through filtered import candidates
for name in [n for n in import_candidates
if n.endswith('.py') and n != "__init__.py"]:
if debug:
print ">>> IMPORT CANDIDATE NAME: "+name
name = name.replace('.py', '')
if debug:
print ">>> IMPORT "+"{0}.commands.{1}".format(command_dir, name)
module = __import__(
"{0}.commands.{1}".format(command_dir, name),
fromlist=['']
)
command = getattr(module, "command", None)
if command is None:
utils.write_err("Invalid command implementation (%s)" % name)
command.parser_init(self.subparsers)
self.commands[command.name] = command
def parse(self):
self.opts = vars(self.parser.parse_args())
self.opts["verbose"] = self.opts.get("verbosity", 0) > 0
self.opts["cli"] = self
return self.opts
def current_command_name(self):
return self.opts.get("command")
def get_nucleator_command(self, command_name):
return self.commands[command_name]
def current_nucleator_command(self):
return self.get_nucleator_command(self.current_command_name())
def execute(self):
self.current_nucleator_command().execute(**self.opts)
def dump(self):
utils.write ("{0}{1}".format(self.current_command_name(), os.linesep))
import json
utils.write (
"{0}{1}".format(
json.dumps(
{k:v for (k,v) in self.opts.iteritems() if k != "cli"},
self.opts,
sort_keys=True,
indent=4, separators=(',', ': ')
),
os.linesep
)
)
| apache-2.0 | 9,091,823,914,105,722,000 | 40.934426 | 146 | 0.585418 | false |
robcarver17/pysystemtrade | sysobjects/production/roll_state.py | 1 | 2906 | from enum import Enum
from syscore.objects import named_object
RollState = Enum("RollState",(
"No_Roll",
"Passive",
"Force",
"Force_Outright",
"Roll_Adjusted"))
default_state = RollState.No_Roll
roll_adj_state = RollState.Roll_Adjusted
roll_explanations = {
RollState.No_Roll:"No rolling happens. Will only trade priced contract.",
RollState.Passive:"Allow the contract to roll naturally (closing trades in priced contract, opening trades in forward contract)",
RollState.Force:"Force the contract to roll ASAP using spread order",
RollState.Force_Outright:"Force the contract to roll ASAP using two outright orders",
RollState.Roll_Adjusted:"Roll adjusted prices from existing priced to new forward contract (after adjusted prices have been changed, will automatically move state to no roll"}
def is_forced_roll_state(roll_state: RollState):
if roll_state == RollState.Force or roll_state == RollState.Force_Outright:
return True
else:
return False
def is_type_of_active_rolling_roll_state(roll_state: RollState):
if is_forced_roll_state(roll_state) or roll_state == RollState.Roll_Adjusted:
return True
else:
return False
def explain_roll_state_str(roll_state: RollState):
return roll_explanations[RollState[roll_state]]
def name_of_roll_state(roll_state: RollState):
return roll_state.name
def complete_roll_state(roll_state: RollState, priced_position):
if priced_position == 0:
flag_position_in_priced = 0
else:
flag_position_in_priced = 1
return "%s%s" % (name_of_roll_state(roll_state), flag_position_in_priced)
def allowable_roll_state_from_current_and_position(
current_roll_state: RollState, priced_position:int):
# Transition matrix: First option is recommended
# A 0 suffix indicates we have no position in the priced contract
# A 1 suffix indicates we do have a position in the priced contract
allowed_transition = dict(
No_Roll0=["Roll_Adjusted", "Passive", "No_Roll"],
No_Roll1=["Passive", "Force", "Force_Outright", "No_Roll"],
Passive0=["Roll_Adjusted", "Passive", "No_Roll"],
Passive1=["Force", "Force_Outright", "Passive", "No_Roll"],
Force0=["Roll_Adjusted", "Passive"],
Force1=["Force", "Force_Outright", "Passive", "No_Roll"],
Force_Outright0=["Roll_Adjusted", "Passive"],
Force_Outright1=["Force", "Force_Outright", "Passive", "No_Roll"],
Roll_Adjusted0=["No_Roll"],
Roll_Adjusted1=["Roll_Adjusted"],
)
status_plus_position = complete_roll_state(
current_roll_state, priced_position)
try:
allowable_states = allowed_transition[status_plus_position]
except KeyError:
raise Exception(
"State plus position %s not recognised" %
status_plus_position)
return allowable_states
| gpl-3.0 | 1,969,242,482,449,701,400 | 36.25641 | 179 | 0.681693 | false |
Bajoo/client-pc | bajoo/common/periodic_task.py | 1 | 5677 | # -*- coding: utf-8 -*-
import logging
from threading import Timer, Lock
from ..promise import Deferred, CancelledError
_logger = logging.getLogger(__name__)
class PeriodicTask(object):
"""Generic Thread-based service, executing a task at regular interval.
The task is executed first right after the call to `start()`, in a new
thread.
After each execution, the next execution is scheduled after the specified
delay. The delay doesn't include the task's duration.
Attributes:
delay (int): delay between two executions, in seconds. When modified,
the new value will be used only after the next execution.
context (dict): dict that can be used as a scope shared between the
multiple executions and/or the caller.
args (tuple): arguments passed to the task.
kwargs (dict): keyword arguments passed to the task.
Note:
context, args and kwargs attributes are not thread-safe. If needed, the
sync mechanisms (to avoid race conditions) are up to the user.
Example:
>>> def _task(pt, arg):
... assert pt.context['value'] == 3
... assert arg == 17
>>> args = 1
>>> task = PeriodicTask('MyTask', 1, _task, 17)
>>> task.context['value'] = 3
>>> task.start()
>>> task.stop()
"""
def __init__(self, name, delay, task, *args, **kwargs):
"""Constructor
Args:
name (str): Thread name.
delay (float): Delay between two executions, in seconds
task (Callable[[PeriodicTask, ...], T]): task to execute each
periods. First argument is the PeriodicTask instance.
*args (optional): arguments passed to the task.
**kwargs (optional): keywords arguments passed to the task.
"""
self.delay = delay
self.context = {}
self.args = args
self.kwargs = kwargs
self._name = name
self._task = task
self._timer = None
self._canceled = False
self._lock = Lock()
self._is_running = False # must be acceded only with self._lock
self._apply_now = False
self._deferred = None
def _exec_task(self, *args, **kwargs):
with self._lock:
df = self._deferred
self._deferred = None
self._is_running = True
# self._lock must be released during task execution.
result, error = None, None
try:
result = self._task(self, *args, **kwargs)
except BaseException as err:
error = err
_logger.exception('Periodic task %s has raised exception',
self._task)
with self._lock:
self._is_running = False
if self._apply_now:
delay = 0
self._apply_now = False
else:
delay = self.delay
self._timer = Timer(delay, self._exec_task, args=self.args,
kwargs=self.kwargs)
self._timer.name = self._name
self._timer.daemon = True
if not self._canceled:
self._timer.start()
if df:
if error is None:
df.resolve(result)
else:
df.reject(error)
def start(self):
"""Start the task.
The first execution is immediate.
"""
_logger.debug('Start periodic task %s', self._task)
self._timer = Timer(0, self._exec_task, args=self.args,
kwargs=self.kwargs)
self._timer.name = self._name
self._timer.daemon = True
self._timer.start()
def stop(self, join=False):
"""Stop the task.
Note that if the function is running at the moment this method is
called, the current iteration cannot be stopped.
Args:
join (bool, optional): if True, will block until the running task
finish. Default to False
"""
_logger.debug('Stop periodic task %s', self._task)
with self._lock:
self._canceled = True
self._timer.cancel()
if self._deferred:
self._deferred.reject(CancelledError('PeriodicTask stop now.'))
self._deferred = None
if join:
self._timer.join()
def apply_now(self):
"""Apply the task as soon as possible.
Note that if the task is currently running, it will wait the end, then
another iteration will be executed immediately after that.
The method can be called from inside the task itself.
Returns:
Promise[T]: resolved when the task has returned. The promise
resolves with the value returned by the task. If the task
raises an exception, the promise is rejected.
"""
self._timer.cancel()
with self._lock:
if self._deferred:
# special case: twice or more apply_now() at the same time.
return self._deferred.promise
self._deferred = Deferred()
if self._is_running:
# We can't stop the current task, so we set a flag to rerun as
# soon as the task returns.
self._apply_now = True
else:
self._timer.cancel()
self._timer = Timer(0, self._exec_task, args=self.args)
self._timer.name = self._name
self._timer.daemon = True
self._timer.start()
return self._deferred.promise
| gpl-3.0 | -4,949,742,910,043,404,000 | 33.198795 | 79 | 0.550114 | false |
spillai/procgraph | src/procgraph_mpl/plot_anim.py | 1 | 1093 |
__all__ = [
'PlotAnim',
]
class PlotAnim(object):
def __init__(self):
self.handle_line = {}
self.handle_text = {}
self.pylab = None
def set_pylab(self, pylab):
self.pylab = pylab
def assert_pylab_given(self):
if self.pylab is None:
msg = 'Please call set_pylab() before plotting.'
raise ValueError(msg)
def plot(self, name, x, y, *args, **kwargs):
self.assert_pylab_given()
if not name in self.handle_line:
handle, = self.pylab.plot(x, y, *args, **kwargs)
self.handle_line[name] = handle
else:
handle = self.handle_line[name]
handle.set_data(x, y)
def text(self, name, x, y, text, *args, **kwargs):
self.assert_pylab_given()
if not name in self.handle_text:
handle = self.pylab.text(x, y, text, *args, **kwargs)
self.handle_text[name] = handle
else:
handle = self.handle_text[name]
handle.set_text(text)
| lgpl-3.0 | -8,553,758,529,945,379,000 | 26.325 | 65 | 0.511436 | false |
silasary/StackIt | StackIt/builder.py | 1 | 18659 | import os, sys, re
#Image manipulation
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
#Check input format
import mmap
#XML parsing
import xml.etree.ElementTree
#HTML parsing
from lxml import html
from StackIt import scraper, config, decklist, globals
from StackIt.globals import Card, specmana, aftermath
#ensure that mana costs greater than 9 (Kozilek, Emrakul...) aren't misaligned
FILTER = Image.LANCZOS
# Sizes
INNER_MTG_MANA_COST_IMAGE_SIZE = 15
OUTER_MTG_MANA_COST_IMAGE_SIZE = 16
HEX_MANA_COST_IMAGE_SIZE = 20
INNER_ENTRY_HEIGHT = 34
OUTER_ENTRY_HEIGHT = 35
DECK_WIDTH = 280
HEX_DECK_WIDTH = 219
HEX_MASTER_DECK_WIDTH = 320
SCROLLING_DECK_WIDTH_ADJUSTMENT = 10
SCROLLING_DECK_WIDTH = DECK_WIDTH - SCROLLING_DECK_WIDTH_ADJUSTMENT
# Image Positioning
HEX_MANA_COST_LEFT = 10
HEX_MANA_COST_TOP = 7
HEX_MANA_COST_SIZE = 20
HEX_BANNER_TOP = 50
SIDEBOARD_LEFT = 50
MTG_CMC_OFFSET_TOP = 8
# Crops
HEX_IMAGE_CROP = (39, 130, 309, 164)
HEX_MAINGUY_CROP = (134, 55, 185, 275)
MTG_BACKGROUND_X_TOP_OFFSET = 12
MTG_BACKGROUND_Y_OFFSET = 125
MTG_BACKGROUND_Y_OFFSET_AFTERMATH = 55
POKEMON_BACKGROUND_OFFSET_Y_TOP = 90
POKEMON_BACKGROUND_OFFSET_X_BOTTOM = 10
POKEMON_BACKGROUND_OFFSET_Y_BOTTOM = 100
MTG_WIDTH_CROP_RIGHT = 10
POKEMON_WIDTH_CROP_RIGHT = 10
HEX_WIDTH_CROP_RIGHT = 22
# Colors
BLACK = (0, 0, 0)
NEARLY_WHITE = (250, 250, 250)
RGB_MAX_0 = 255
RGB_MAX_1 = 256
HALF = int(RGB_MAX_1 / 2)
BAD_HALF = int(RGB_MAX_0 / 2)
QUARTER = int(RGB_MAX_1 / 4)
BAD_THREE_QUARTERS = 190
# Text Positioning
TEXT_LEFT, TEXT_TOP = 7, 7
POKEMON_TEXT_LEFT, POKEMON_TEXT_TOP = 7, 12
MTG_TITLE_POSITION = (10, 7)
POKEMON_TITLE_POSITION = (10, 8)
TEXT_PASTE_LEFT = 50
HEX_TITLE_LEFT = 15
HEX_TITLE_TOP = 12
SIDEBOARD_TITLE_POSITION = (10, 7)
HEX_BANNER_POSITION = (15, 15)
if config.Get('options', 'indent_hex_title'):
TITLE_INDENT = TEXT_PASTE_LEFT
else:
TITLE_INDENT = 0
# Type Sizes
MTG_FONT_SIZE = 14
MTG_TITLE_FONT_SIZE = 18
HEX_FONT_SIZE = 16
HEX_TITLE_FONT_SIZE = 18
POKEMON_FONT_SIZE = 10
POKEMON_TITLE_FONT_SIZE = 14
# Rotation
ROTATE_RIGHT = 90
ROTATE_LEFT = -90
#some position initialization
X_TOP = 8
X_BOTTOM = 304
Y_TOP = 11.5
Y_BOTTOM = 45.25
X_TOP_POKEMON = 8
X_BOTTOM_POKEMON = 237
Y_TOP_POKEMON = 11.5
Y_BOTTOM_POKEMON = 45.25
def GenerateCMC(name, cost):
check9 = '0123456'
adjustcmc = False
cmc = Image.new('RGBA', (OUTER_MTG_MANA_COST_IMAGE_SIZE * len(cost), OUTER_MTG_MANA_COST_IMAGE_SIZE))
diskcost = cost.strip().replace('*', '_').replace('/', '-')
lookupCMC = os.path.join(globals.CMC_PATH, '{cost}.png'.format(cost=diskcost))
if os.path.exists(lookupCMC):
tap0 = Image.open(lookupCMC)
if tap0.mode != 'RGBA':
tap0 = tap0.convert('RGBA')
cmc.paste(tap0, (0, 0), mask=tap0)
#still need to check cost adjustment...
for n in range(len(cost) - 1):
if (cost[n] == '1') and (check9.find(cost[n + 1]) != -1):
adjustcmc = True
else:
greaterthan9 = False
for n in range(len(cost)):
#reset the large mana cost markers
if greaterthan9:
greaterthan9 = False
adjustcmc = True
continue
#lands have no mana cost and are tagged with '*'
if cost[n] == "*":
continue
#add correct treatment of separation for split cards
elif cost[n] == '/':
symbol = os.path.join(globals.RESOURCES_PATH, 'mana', 'Mana_spn.png')
tap0 = Image.open(symbol)
if tap0.mode != 'RGBA':
tap0 = tap0.convert('RGBA')
tap = tap0.resize((OUTER_MTG_MANA_COST_IMAGE_SIZE, OUTER_MTG_MANA_COST_IMAGE_SIZE), FILTER)
cmc.paste(tap, (INNER_MTG_MANA_COST_IMAGE_SIZE * n, 0), mask=tap)
else:
if (len(cost) > n + 1) and (cost[n] == '1') and (check9.find(cost[ n+ 1]) != -1):
finalcost = cost[n] + cost[n + 1]
greaterthan9 = True
else:
finalcost = cost[n]
symbol = os.path.join(globals.RESOURCES_PATH, 'mana', 'Mana_' + finalcost + '.png')
tap0 = Image.open(symbol)
if tap0.mode != 'RGBA':
tap0 = tap0.convert('RGBA')
tap = tap0.resize((OUTER_MTG_MANA_COST_IMAGE_SIZE, OUTER_MTG_MANA_COST_IMAGE_SIZE), FILTER)
cmc.paste(tap, (INNER_MTG_MANA_COST_IMAGE_SIZE * n, 0), mask=tap)
cmc.save(lookupCMC)
return cmc, adjustcmc
def draw_hex_card(name, guid, quantity, nstep):
lookupScan = scraper.download_scanHex(name, guid)
img = Image.open(lookupScan)
img = img.crop(HEX_IMAGE_CROP)
#resize the gradient to the size of im...
alpha = gradient.resize(img.size, FILTER)
#put alpha in the alpha band of im...
img.putalpha(alpha)
bkgd = Image.new("RGB", img.size, "black")
bkgd.paste(img, (0, 0), mask=img)
cut = bkgd
draw = ImageDraw.Draw(cut)
#create text outline
text = str(quantity) + ' ' + name
draw.text((TEXT_LEFT - 1, TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT + 1, TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT - 1, TEXT_TOP + 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT + 1, TEXT_TOP + 1), text, BLACK, font=fnt)
#enter text
draw.text((TEXT_LEFT, TEXT_TOP), text, NEARLY_WHITE, font=fnt)
deck.paste(cut, (TEXT_PASTE_LEFT, (OUTER_ENTRY_HEIGHT) * nstep))
def draw_mtg_card(card, nstep):
isAftermath = False
if card.name.find(" // ") != -1:
namesplit = card.name.replace(" // ", "/")
lookupScan = scraper.download_scan(namesplit, card.set, card.collector_num)
if card.name in aftermath:
isAftermath = True
else:
lookupScan = scraper.download_scan(card.name, card.set, card.collector_num)
img = Image.open(lookupScan)
if (card.name.find(" // ") != -1) and (isAftermath == False):
img = img.rotate(ROTATE_LEFT)
#check if im has Alpha band...
if img.mode != 'RGBA':
img = img.convert('RGBA')
#resize the gradient to the size of im...
alpha = gradient.resize(img.size, FILTER)
#put alpha in the alpha band of im...
img.putalpha(alpha)
bkgd = Image.new("RGB", img.size, "black")
bkgd.paste(img, (0, 0), mask=img)
if isAftermath == True:
cut = bkgd.crop((X_TOP + MTG_BACKGROUND_X_TOP_OFFSET, Y_TOP + MTG_BACKGROUND_Y_OFFSET_AFTERMATH, X_BOTTOM, Y_BOTTOM + MTG_BACKGROUND_Y_OFFSET_AFTERMATH))
else:
cut = bkgd.crop((X_TOP + MTG_BACKGROUND_X_TOP_OFFSET, Y_TOP + MTG_BACKGROUND_Y_OFFSET, X_BOTTOM, Y_BOTTOM + MTG_BACKGROUND_Y_OFFSET))
draw = ImageDraw.Draw(cut)
text = str(card.quantity) + ' ' + card.name
#create text outline
draw.text((TEXT_LEFT - 1, TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT + 1, TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT - 1, TEXT_TOP + 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT + 1, TEXT_TOP + 1), text, BLACK, font=fnt)
#enter text
draw.text((TEXT_LEFT, TEXT_TOP), text, NEARLY_WHITE, font=fnt)
cmc, adjustcmc = GenerateCMC(card.name, card.cost)
#place the cropped picture of the current card
deck.paste(cut, (0, INNER_ENTRY_HEIGHT * nstep))
#for scrolling decklist
tmpwidth, tmpheight = cut.size
cut2 = cut.crop((0, 0, tmpwidth - SCROLLING_DECK_WIDTH_ADJUSTMENT, tmpheight))
deck2.paste(cut2, (SCROLLING_DECK_WIDTH * nstep, 0))
#adjust cmc size to reflex manacost greater than 9
if adjustcmc:
deck.paste(cmc, (DECK_WIDTH - INNER_MTG_MANA_COST_IMAGE_SIZE * len(card.cost), MTG_CMC_OFFSET_TOP + INNER_ENTRY_HEIGHT * nstep), mask=cmc)
#for scrolling decklist
deck2.paste(cmc, (SCROLLING_DECK_WIDTH * (nstep + 1) - INNER_MTG_MANA_COST_IMAGE_SIZE * len(card.cost), MTG_CMC_OFFSET_TOP), mask=cmc)
adjustcmc = False
else:
deck.paste(cmc, (DECK_WIDTH - INNER_MTG_MANA_COST_IMAGE_SIZE * (len(card.cost) + 1), MTG_CMC_OFFSET_TOP + INNER_ENTRY_HEIGHT * nstep), mask=cmc)
#for scrolling decklist
deck2.paste(cmc, (SCROLLING_DECK_WIDTH * (nstep + 1) - INNER_MTG_MANA_COST_IMAGE_SIZE * (len(card.cost) + 1), MTG_CMC_OFFSET_TOP), mask=cmc)
globals.mkcachepaths()
# create a horizontal gradient...
Hexgradient = Image.new('L', (1, RGB_MAX_0))
#map the gradient
for x in range(QUARTER):
Hexgradient.putpixel((0, x), RGB_MAX_0)
for x in range(QUARTER):
Hexgradient.putpixel((0, QUARTER + x), RGB_MAX_0 - x)
for x in range(HALF):
Hexgradient.putpixel((0, BAD_HALF + x), BAD_THREE_QUARTERS - int(1.5 * x))
# create a horizontal gradient...
gradient = Image.new('L', (RGB_MAX_0, 1))
#map the gradient
for x in range(HALF):
gradient.putpixel((x, 0), int(1.5 * x))
for x in range(QUARTER):
gradient.putpixel((BAD_HALF + x, 0), BAD_THREE_QUARTERS + x)
for x in range(QUARTER):
gradient.putpixel((BAD_THREE_QUARTERS + x, 0), RGB_MAX_0 - 1)
def main(filename):
doSideboard = config.Get('options', 'display_sideboard')
#open user input decklist
raw_decklist = open(str(filename), 'r')
deck_list = decklist.parse_list(raw_decklist)
raw_decklist.close()
print(repr(deck_list))
nstep = 1
# create a header with the deck's name
global fnt
if deck_list.game == decklist.MTG:
fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'mtg')), MTG_FONT_SIZE)
fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'mtg')), MTG_TITLE_FONT_SIZE)
title = Image.new("RGB", (DECK_WIDTH, INNER_ENTRY_HEIGHT), "black")
drawtitle = ImageDraw.Draw(title)
drawtitle.text(MTG_TITLE_POSITION, os.path.basename(str(filename))[0:-4], NEARLY_WHITE, font=fnt_title)
elif deck_list.game == decklist.POKEMON:
fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'pkmn')), POKEMON_FONT_SIZE)
fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'pkmn')), POKEMON_TITLE_FONT_SIZE)
title = Image.new("RGB", (HEX_DECK_WIDTH, OUTER_ENTRY_HEIGHT), "black")
drawtitle = ImageDraw.Draw(title)
drawtitle.text(POKEMON_TITLE_POSITION, os.path.basename(str(filename))[0:-4], NEARLY_WHITE, font=fnt_title)
elif deck_list.game == decklist.HEX:
fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'hex')), HEX_FONT_SIZE)
fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'hex')), HEX_TITLE_FONT_SIZE)
title = Image.new("RGB", (HEX_MASTER_DECK_WIDTH, INNER_ENTRY_HEIGHT), "black")
nametitle = str(filename)[0:-4]
nshard = 0
for re_match in re.finditer(r'(\[[^\]]*\])', nametitle):
shard = re_match.group(0)
if nametitle.find(shard) != -1:
nametitle = nametitle.replace(shard, '')
newshard = Image.open(os.path.join(globals.RESOURCES_PATH, 'hexicons', shard + '.png')).resize((HEX_MANA_COST_IMAGE_SIZE, HEX_MANA_COST_IMAGE_SIZE), FILTER)
title.paste(newshard, (TITLE_INDENT + HEX_MANA_COST_LEFT + nshard * HEX_MANA_COST_SIZE, HEX_MANA_COST_TOP))
nshard = nshard + 1
drawtitle = ImageDraw.Draw(title)
drawtitle.text((TITLE_INDENT + HEX_TITLE_LEFT + nshard * HEX_MANA_COST_IMAGE_SIZE, HEX_TITLE_TOP), os.path.basename(nametitle), NEARLY_WHITE, font=fnt_title)
ncountMB = len(deck_list.mainboard)
ncountSB = len(deck_list.sideboard)
ncount = ncountMB
if ncountSB == 0:
doSideboard = False
if doSideboard:
#create a Sideboard partition
sideboard = Image.new("RGB", (DECK_WIDTH, INNER_ENTRY_HEIGHT), "black")
drawtitle = ImageDraw.Draw(sideboard)
sideboard_name = "Sideboard"
if deck_list.game == decklist.HEX:
sideboard_name = "Reserves"
drawtitle.text(SIDEBOARD_TITLE_POSITION, sideboard_name, NEARLY_WHITE, font=fnt_title)
ncount += ncountSB + 1
#define the size of the canvas, incl. space for the title header
if deck_list.game == decklist.MTG:
deckwidth = DECK_WIDTH
deckheight = INNER_ENTRY_HEIGHT * (ncount + 1)
#for scrolling decklist
deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1)
deckheight2 = INNER_ENTRY_HEIGHT
elif deck_list.game == decklist.POKEMON:
deckwidth = HEX_DECK_WIDTH
deckheight = OUTER_ENTRY_HEIGHT * (ncount + 1)
deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1)
deckheight2 = INNER_ENTRY_HEIGHT
elif deck_list.game == decklist.HEX:
deckwidth = HEX_MASTER_DECK_WIDTH
deckheight = OUTER_ENTRY_HEIGHT * (ncount + 1)
deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1)
deckheight2 = INNER_ENTRY_HEIGHT
#reset the sideboard marker
isSideboard = 0
global deck
deck = Image.new("RGB", (deckwidth, deckheight), "white")
#for scrolling decklist
global deck2
deck2 = Image.new("RGB", (deckwidth2, deckheight2), "white")
deck.paste(title, (0, 0))
#for scrolling decklist
title2 = title.crop((0, 0, SCROLLING_DECK_WIDTH, INNER_ENTRY_HEIGHT))
deck2.paste(title2, (0, 0))
#now read the decklist
if deck_list.game == decklist.MTG:
lands = []
for card in deck_list.mainboard:
#this step checks whether a specific art is requested by the user - provided via the set name
if card.cost == "*":
lands.append(card)
continue
draw_mtg_card(card, nstep)
nstep = nstep + 1
for card in lands:
draw_mtg_card(card, nstep)
nstep = nstep + 1
if doSideboard:
deck.paste(sideboard, (0, INNER_ENTRY_HEIGHT * nstep))
#for scrolling decklist
sideboard2 = sideboard.crop((0, 0, SCROLLING_DECK_WIDTH, INNER_ENTRY_HEIGHT))
deck2.paste(sideboard2, (SCROLLING_DECK_WIDTH * nstep, 0))
nstep = nstep + 1
for card in deck_list.sideboard:
draw_mtg_card(card, nstep)
nstep = nstep + 1
elif deck_list.game == decklist.POKEMON:
for card in deck_list.mainboard:
quantity = card.quantity
lookupScan, displayname = scraper.download_scanPKMN(card.name, card.set, card.collector_num)
img = Image.open(lookupScan)
#check if im has Alpha band...
if img.mode != 'RGBA':
img = img.convert('RGBA')
#resize the gradient to the size of im...
alpha = gradient.resize(img.size, FILTER)
#put alpha in the alpha band of im...
img.putalpha(alpha)
bkgd = Image.new("RGB", img.size, "black")
bkgd.paste(img, (0, 0), mask=img)
cut = bkgd.crop((X_TOP_POKEMON, Y_TOP_POKEMON + POKEMON_BACKGROUND_OFFSET_Y_TOP, X_BOTTOM_POKEMON - POKEMON_BACKGROUND_OFFSET_X_BOTTOM, Y_BOTTOM_POKEMON + POKEMON_BACKGROUND_OFFSET_Y_BOTTOM))
cut = cut.resize((deckwidth, INNER_ENTRY_HEIGHT))
draw = ImageDraw.Draw(cut)
#create text outline
text = str(quantity) + ' ' + displayname
draw.text((POKEMON_TEXT_LEFT - 1, POKEMON_TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((POKEMON_TEXT_LEFT + 1, POKEMON_TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((POKEMON_TEXT_LEFT - 1, POKEMON_TEXT_TOP + 1), text, BLACK, font=fnt)
draw.text((POKEMON_TEXT_LEFT + 1, POKEMON_TEXT_TOP + 1), text, BLACK, font=fnt)
#enter text
draw.text((POKEMON_TEXT_LEFT, POKEMON_TEXT_TOP), text, NEARLY_WHITE, font=fnt)
#place the cropped picture of the current card
deck.paste(cut, (0, OUTER_ENTRY_HEIGHT * nstep))
nstep = nstep + 1
elif deck_list.game == decklist.HEX:
banner = Image.new("RGB", (deckheight - OUTER_ENTRY_HEIGHT, HEX_BANNER_TOP), "black")
if len(deck_list.commander) > 0:
cmdr = deck_list.commander[0]
guid = cmdr.collector_num
typeCM = cmdr.set
drawbanner = ImageDraw.Draw(banner)
drawbanner.text(HEX_BANNER_POSITION, str(cmdr.name), NEARLY_WHITE, font=fnt_title)
lookupScan = scraper.download_scanHexCM(cmdr.name, guid, typeCM)
mainguyImg = Image.open(lookupScan)
mainguycut = mainguyImg.crop(HEX_MAINGUY_CROP)
banner = banner.rotate(ROTATE_RIGHT, expand=True)
#check if im has Alpha band...
if mainguycut.mode != 'RGBA':
mainguycut = mainguycut.convert('RGBA')
#resize the gradient to the size of im...
alpha = Hexgradient.resize(mainguycut.size, FILTER)
#put alpha in the alpha band of im...
mainguycut.putalpha(alpha)
banner.paste(mainguycut, (0, 0), mask=mainguycut)
deck.paste(banner, (0, OUTER_ENTRY_HEIGHT))
for card in deck_list.mainboard:
draw_hex_card(card.name, card.collector_num, card.quantity, nstep)
nstep = nstep + 1
if doSideboard:
deck.paste(sideboard, (SIDEBOARD_LEFT, OUTER_ENTRY_HEIGHT * nstep))
nstep = nstep + 1
for card in deck_list.sideboard:
draw_hex_card(card.name, card.collector_num, card.quantity, nstep)
nstep = nstep + 1
if deck_list.game == decklist.MTG:
deck = deck.crop((0, 0, deckwidth - MTG_WIDTH_CROP_RIGHT, deckheight))
deck2 = deck2.crop((0, 0, deckwidth2, deckheight2 - 2))
elif deck_list.game == decklist.POKEMON:
deck = deck.crop((0, 0, deckwidth - POKEMON_WIDTH_CROP_RIGHT, OUTER_ENTRY_HEIGHT * nstep))
elif deck_list.game == decklist.HEX:
deck = deck.crop((0, 0, deckwidth - HEX_WIDTH_CROP_RIGHT, deckheight))
output_path = str(filename)[0:-4] + ".png"
deck.save(output_path)
#for scrolling decklist
output_path2 = str(filename)[0:-4] + "-scroll.png"
deck2.save(output_path2)
altpath = config.Get('options', 'output_path')
if altpath is not None:
deck.save(altpath)
return output_path
| mit | 3,783,695,594,269,186,600 | 37.235656 | 207 | 0.613109 | false |
superfluidity/RDCL3D | code/translator/hot/tosca/tests/test_tosca_autoscaling.py | 1 | 3440 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.policy import Policy
from toscaparser.tests.base import TestCase
import toscaparser.utils.yamlparser
from translator.hot.tosca.tosca_compute import ToscaCompute
from translator.hot.tosca.tosca_policies_scaling import ToscaAutoscaling
class AutoscalingTest(TestCase):
def _tosca_scaling_test(self, tpl_snippet, expectedprops):
nodetemplates = (toscaparser.utils.yamlparser.
simple_parse(tpl_snippet)['node_templates'])
policies = (toscaparser.utils.yamlparser.
simple_parse(tpl_snippet)['policies'])
name = list(nodetemplates.keys())[0]
policy_name = list(policies[0].keys())[0]
for policy in policies:
tpl = policy[policy_name]
targets = tpl["targets"]
properties = tpl["properties"]
try:
nodetemplate = NodeTemplate(name, nodetemplates)
toscacompute = ToscaCompute(nodetemplate)
toscacompute.handle_properties()
policy = Policy(policy_name, tpl, targets,
properties, "node_templates")
toscascaling = ToscaAutoscaling(policy)
parameters = toscascaling.handle_properties([toscacompute])
self.assertEqual(parameters[0].properties, expectedprops)
except Exception:
raise
def test_compute_with_scaling(self):
tpl_snippet = '''
node_templates:
my_server_1:
type: tosca.nodes.Compute
capabilities:
host:
properties:
num_cpus: 2
disk_size: 10 GB
mem_size: 512 MB
os:
properties:
# host Operating System image properties
architecture: x86_64
type: Linux
distribution: RHEL
version: 6.5
policies:
- asg:
type: tosca.policies.Scaling
description: Simple node autoscaling
targets: [my_server_1]
triggers:
resize_compute:
description: trigger
condition:
constraint: utilization greater_than 50%
period: 60
evaluations: 1
method: average
properties:
min_instances: 2
max_instances: 10
default_instances: 3
increment: 1
'''
expectedprops = {'desired_capacity': 3,
'max_size': 10,
'min_size': 2,
'resource': {'type': 'asg_res.yaml'}}
self._tosca_scaling_test(
tpl_snippet,
expectedprops)
| apache-2.0 | -5,870,668,493,590,456,000 | 36.802198 | 78 | 0.566279 | false |
arseneyr/essentia | test/src/unittest/temporal/test_loudnessebur128.py | 1 | 6832 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestLoudnessEBUR128(TestCase):
def testRegression(self):
# The test audio files for loudness are provided in EBU Tech 3341
# https://tech.ebu.ch/docs/tech/tech3341.pdf
# M, S, I = -20 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', '1kHz_sine_-20LUFS-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
m, s, i, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqualVector(m, essentia.array([-20.] * len(m)), 0.1)
self.assertAlmostEqualVector(s, essentia.array([-20.] * len(s)), 0.1)
self.assertAlmostEqual(i, -20., 0.1)
# M, S, I = -26 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', '1kHz_sine_-26LUFS-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
m, s, i, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqualVector(m, essentia.array([-26.] * len(m)), 0.1)
self.assertAlmostEqualVector(s, essentia.array([-26.] * len(s)), 0.1)
self.assertAlmostEqual(i, -26., 0.1)
# M, S, I = -40 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', '1kHz_sine_-40LUFS-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
m, s, i, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqualVector(m, essentia.array([-40.] * len(m)), 0.1)
self.assertAlmostEqualVector(s, essentia.array([-40.] * len(s)), 0.1)
self.assertAlmostEqual(i, -40., 0.1)
# M, S, I = -23 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3341-1-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
m, s, i, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqualVector(m, essentia.array([-23.] * len(m)), 0.1)
self.assertAlmostEqualVector(s, essentia.array([-23.] * len(s)), 0.1)
self.assertAlmostEqual(i, -23., 0.1)
# M, S, I = -33 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3341-2-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
m, s, i, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqualVector(m, essentia.array([-33.] * len(m)), 0.1)
self.assertAlmostEqualVector(s, essentia.array([-33.] * len(s)), 0.1)
self.assertAlmostEqual(i, -33., 0.1)
# I = -23 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3341-3-16bit-v02.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
_, _, i, _ = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqual(i, -23., 0.1)
# I = -23 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3341-4-16bit-v02.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
_, _, i, _ = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqual(i, -23., 0.1)
# I = -23 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3341-5-16bit-v02.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
_, _, i, _ = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqual(i, -23., 0.1)
# I = -23 +- 0.1 LUFS
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3341-7_seq-3342-5-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
_, _, i, _ = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqual(i, -23., 0.1)
# Test audio files for dynamic range are provided in EBU Tech Doc 3342
# https://tech.ebu.ch/docs/tech/tech3342.pdf
# LRA = 10 +- 1 LU
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3342-1-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
_, _, _, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqual(r, 10., 1.)
# LRA = 5 +- 1 LU
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3342-2-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
_, _, _, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqual(r, 5., 1.)
# LRA = 20 +- 1 LU
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3342-3-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
_, _, _, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqual(r, 20., 1.)
# LRA = 15 +- 1 LU
filename = join(testdata.audio_dir, 'generated', 'ebur128', 'seq-3342-4-16bit.flac')
audio, samplerate, _, _, _, _ = AudioLoader(filename=filename)()
_, _, _, r = LoudnessEBUR128(sampleRate=samplerate)(audio)
self.assertAlmostEqual(r, 15., 1.)
def testEmpty(self):
# empty (0,2) array
audio = essentia.array([[1.,1.]])[:-1]
self.assertComputeFails(LoudnessEBUR128(), audio)
def testSilence(self):
audio = essentia.array([[0, 0]] * 44100)
m, s, i, r = LoudnessEBUR128()(audio)
# Momentary and short-term loudness can have values below absolute threshold of -70. LUFS
for x in m:
self.assert_(x <= -70.)
for x in s:
self.assert_(x <= -70.)
self.assertEqual(i, -70.)
self.assertEqual(r, 0.)
suite = allTests(TestLoudnessEBUR128)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | 2,107,436,726,347,414,000 | 46.453901 | 103 | 0.600117 | false |
GregSilverman/cohort_rest_api | rest_api/build_atom.py | 1 | 14183 | #!/usr/bin/env python
from sqlalchemy import between
from sqlalchemy.sql import and_, label
from app import db, models
import htsql_methods as hsql
Clinical = models.ClinicalData
Attribute = models.Attribute
"""
Example SQL atomic query for modified nested model:
select *
from
clinical_data
(select patient_sid, lft, rgt, a.attribute_id
from clinical_data
where attribute_id = 'ID FOR DEMOGRAPHICS') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft
and
cd.rgt <= ldl.rgt
Procedure creates an atomic query, defined by:
i%2 = 0 -> initial subquery of bucket/attribute
From above example, the initial subquery that pulls the bucket:
select patient_sid, lft, rgt, attribute_id
from clinical_data
where attribute_id = 'ID FOR DEMOGRAPHICS')
1%2 != 0 -> drill down to specific bucket attribute
URL comprised of a single atom will look like:
atom: demographics:'id for demographics';eq;demographics;demographics:'id for sex';eq;M
NB: this is attached to incoming requests as a JSON document
element part 1: bucket
type:key -> demographics:attribute.id for attribute.value = demographics
comparator -> eq
attribute value (bucket) -> demographics
element part 2: bucket item
type:key -> demographics:attribute.id for attribute.value = sex
comparator -> eq
attribute value -> M
molecule made up of two atoms: (test_code:'id for test_code';eq;13457-7;test_code:'id for result_value_num';ge;160
&
basic_vitals:'id for blood_pressure_systolic';eq;blood_pressure_systolic;basic_vitals:'id for blood_pressure_systolic';ge;160)
example query:
select *
from
clinical_data cd inner join
(select patient_sid, lft as lft_ldl, rgt as rgt_ldl
from clinical_data
where string_value = '13457-7' and attribute_id = '12345') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft_ldl
and
cd.rgt <= ldl.rgt_ldl
where double_value >= 160 and attribute_id = '34567'
order by cd.lft;
"""
# assemble canonical atomic query using parsed components from URL payload
def make_atomic_query(key_type, key, comparator, value, comparator_date, value_date):
a = [] # atomic array of query elements
date = []
whole = [] # entire data set with no constraints
transform = ['medications', 'demographics']# data need to have special characters removed for querying
numeric = ['int', 'float', 'double']
char = ['string']
# initialize lists
for i in xrange(0, 2):
a.append('')
whole.append('')
if comparator[i] == 'between':
arg = value[i].split(',', 2)
if comparator_date[i]:
if comparator_date[i] == 'between':
date = value_date[i].split(',', 2)
# create queries
for i in xrange(0, 2):
# assemble base query
if i == 0:
a[i] = db.session.query(Clinical.patient_sid,
Clinical.lft,
Clinical.rgt,
Clinical.attribute_id)
else:
a[i] = db.session.query(Clinical.patient_sid,
Clinical.lft,
Clinical.rgt,
label('attribute_value', Clinical.attribute_id),
Clinical.double_value,
Clinical.string_value)
'''
equivalent to:
select patient_sid, lft, rgt
from clinical_data
'''
# grab the desired bucket
if i == 0:
# grab bucket by attribute
a[i] = a[i].filter(Clinical.attribute_id == int(key[i]))
'''
equivalent to:
select patient_sid, lft, rgt
from clinical_data
where attribute_id = '12345'
'''
# NB: these are special characters for building the parse tree -> clean them
if key_type[i] in transform:
name = value[i].replace('_', ' ').\
replace('{', '('). \
replace('}', ')')
else: name = value[i]
# grab specific bucket
a[i] = a[i].filter(Clinical.string_value.op(comparator[i])(name)).subquery()
'''
equivalent to:
select patient_sid, lft, rgt
from clinical_data
where string_value = '13457-7' and attribute_id = '12345'
'''
# pull item from bucket by attribute name with criterion value
elif i == 1:
# grab attribute of interest by name
'''
a[i] = a[i].join(a[i-1],
and_(Clinical.patient_sid == a[i-1].c.patient_sid,
Clinical.lft >= a[i-1].c.lft,
Clinical.rgt <= a[i-1].c.rgt)).\
filter(Clinical.attribute_id == key[i])
'''
a[i] = a[i].join(a[i-1],
and_(Clinical.patient_sid == a[i-1].c.patient_sid,
Clinical.attribute_id == int(key[i]))). \
filter(Clinical.lft >= a[i-1].c.lft,
Clinical.rgt <= a[i-1].c.rgt)
# unconstrained data set for printing all records
whole[i] = a[i]
'''
equivalent to:
select patient_sid, lft, rgt
from
clinical_data cd inner join
(select patient_sid, lft, rgt
from clinical_data
where string_value = '13457-7' and attribute_id = '12345') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft
and
cd.rgt <= ldl.rgt
where attribute_id = '34567';
'''
# flag to control output of all data for desired bucket
print_all = False
# for all data for bucket, no filtering is necessary
if 'OUT' in comparator[i]:
print_all = True
if not 'OUT' in comparator[i]:
qstring = "/attribute{data_type.name}?id='" + key[i] + "'"
data_type = hsql.get_data(qstring)
# first: convert to correct data type for utilization of proper covering index
# NB: default is string
if data_type in numeric:
if comparator[i] != 'between':
a[i] = a[i].filter(Clinical.double_value.op(comparator[i])((float(value[i]))))
else:
a[i] = a[i].filter(between(Clinical.double_value,
float(arg[0]),
float(arg[1])))
elif data_type in char:
# clean up incoming string values representative of specific criterion value
if key_type[i] in transform:
name = value[i].replace('_', ' ').\
replace('{', '('). \
replace('}', ')')
else: name = value[i]
a[i] = a[i].filter(Clinical.string_value.op(comparator[i])(name))
'''
equivalent to:
select patient_sid, lft, rgt
from
clinical_data cd inner join
(select attribute_id, patient_sid, lft, rgt
from clinical_data
where string_value = '13457-7' and attribute_id = '12345') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft
and
cd.rgt <= ldl.rgt
where double_value >= 160 and attribute_id = '34567';
'''
# query by date
if comparator_date[i]:
if comparator_date[i] == 'between':
a[i] = a[i].filter(between(Clinical.event_date,
date[0],
date[1]))
else:
a[i] = a[i].filter(Clinical.event_date.op(comparator_date[i])([value_date[i]]))
'''
equivalent to:
select patient_sid, lft, rgt
from
clinical_data cd inner join
(select attribute_id, patient_sid, lft, rgt
from clinical_data
where string_value = '13457-7' and attribute_id = '12345') ldl
on
cd.patient_sid = ldl.patient_sid
and
cd.lft >= ldl.lft
and
cd.rgt <= ldl.rgt
where double_value >= 160 and attribute_id = '34567'
and cd.event_date >= '1/1/1970';
'''
# construct final subquery
a[i] = a[i].subquery()
else:
print 'ERROR'
return a[1], whole[1], print_all
# parse query components: atoms -> particles
# TODO future: implement more general method of mapping using
# http://stackoverflow.com/questions/14845196/dynamically-constructing-filters-in-sqlalchemy
# TODO: implement as parallel loop
def parse_atomic_particles(atom):
# delimiter between atomic query particles: key, comparator, value
# used to split atom into particles
separator = ';'
# initialize lists
a = [] # list element for returned atoms
whole = []
for i in xrange(0, 1):
a.append('')
whole.append('')
for j in xrange(0, 1):
# initialize query components
particles = atom.split(separator, 6) # atom consists of 6 query components to be parsed, aka particles
key_type = [] # array of query bucket names
key = [] # array of key bucket ids
comparator = [] # array of comparators
value = [] # array of values
comparator_date = [] # array of date comparators
value_date = [] # array of date components
for i in xrange(len(particles)):
particle = particles[i]
# Each atomic unit consists of 6 "particles" delimited by a ';',
# where each particle consists of a:
#
# -> key: representing a bucket name by an attribute
# -> comparator: representing the logical operation to perform, NB: for bucket this should always be 'eq'
# -> value: name of bucket
# -> key: representing an item within the bucket to query by attribute name
# -> comparator: representing the logical operation to perform on given attribute compared to given value
# -> value: attribute item's value for comparison
# map particle components to appropriate lists
value_comparator_list = ['eq',
'grte',
'lete',
'bt',
'gt',
'lt',
'prn']
date_comparator_list = ['between',
'grt',
'lss']
comparator_mapper = [
(':', particle.split(':')),
('eq', '='),
('grte', '>='),
('lete', '<='),
('bt', 'between'),
('between', 'between'),
('grt', '>='),
('lss', '<='),
('lt', '<'),
('gt', '>'),
('prn', 'OUT')
]
if any(ext in particle for ext in value_comparator_list) or \
any(ext in particle for ext in date_comparator_list) or \
':' in particle:
def apply_mapper(particle):
for item, action in comparator_mapper:
if item in particle:
if ':' in particle:
key_type.append(action[0])
key.append(action[1])
break
elif any(ext in particle for ext in value_comparator_list):
comparator.append(action)
break
# date comparison given in value particle
elif any(ext in particle for ext in date_comparator_list):
# grab operator for date comparison from list
date_stuff = particle.split(',DATE,')
value.append(date_stuff[0])
comparator_date.append(action)
# get dates and split in the case of a between comparison
date = date_stuff[1].split(',')
if len(date) == 2:
temp = date[1]
else:
temp = date[1] + ',' + date[2]
value_date.append(temp)
break
else:
print'error'
apply_mapper(particle)
# if DATE component is not part of value particle use way back in history as default
else:
comparator_date.append('>=')
value_date.append('1776-01-01')
value.append(particle)
a[j], whole[j], print_all = make_atomic_query(key_type, key, comparator, value, comparator_date, value_date)
return a[0], whole[0], print_all
| gpl-3.0 | -4,821,364,456,509,184,000 | 34.019753 | 131 | 0.476345 | false |
Iconoclasteinc/tgit | tgit/tagging/embedded_containers.py | 1 | 1536 | # -*- coding: utf-8 -*-
#
# TGiT, Music Tagger for Professionals
# Copyright (C) 2013 Iconoclaste Musique Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import defaultdict
from tgit.metadata import Metadata
from tgit import fs
from .flac_container import FlacContainer
from .id3_container import ID3Container
class EmptyContainer:
# noinspection PyUnusedLocal
@staticmethod
def load(filename):
return Metadata()
@staticmethod
def save(filename, metadata):
pass
_containers = defaultdict(EmptyContainer, {".mp3": ID3Container(), ".flac": FlacContainer()})
def _container_for(filename):
return _containers[fs.ext(filename)]
def load_metadata(filename):
return _container_for(filename).load(filename)
def save_metadata(filename, metadata):
return _container_for(filename).save(filename, metadata)
| gpl-3.0 | 2,475,258,062,397,315,000 | 29.72 | 93 | 0.747396 | false |
crakensio/django_training | lib/python2.7/site-packages/bpython/curtsiesfrontend/interaction.py | 1 | 4789 | import greenlet
import time
import curtsies.events as events
from bpython.repl import Interaction as BpythonInteraction
from bpython.curtsiesfrontend.manual_readline import char_sequences as rl_char_sequences
class StatusBar(BpythonInteraction):
"""StatusBar and Interaction for Repl
Passing of control back and forth between calls that use interact api
(notify, confirm, file_prompt) like bpython.Repl.write2file and events
on the main thread happens via those calls and self.wait_for_request_or_notify.
Calling one of these three is required for the main thread to regain control!
This is probably a terrible idea, and better would be rewriting this
functionality in a evented or callback style, but trying to integrate
bpython.Repl code.
"""
def __init__(self, initial_message='', permanent_text="", refresh_request=lambda: None):
self._current_line = ''
self.cursor_offset_in_line = 0
self.in_prompt = False
self.in_confirm = False
self.waiting_for_refresh = False
self.prompt = ''
self._message = initial_message
self.message_start_time = time.time()
self.message_time = 3
self.permanent_text = permanent_text
self.main_greenlet = greenlet.getcurrent()
self.request_greenlet = None
self.refresh_request = refresh_request
@property
def has_focus(self):
return self.in_prompt or self.in_confirm or self.waiting_for_refresh
def message(self, msg):
self.message_start_time = time.time()
self._message = msg
def _check_for_expired_message(self):
if self._message and time.time() > self.message_start_time + self.message_time:
self._message = ''
def process_event(self, e):
"""Returns True if shutting down"""
assert self.in_prompt or self.in_confirm or self.waiting_for_refresh
if isinstance(e, events.RefreshRequestEvent):
self.waiting_for_refresh = False
self.request_greenlet.switch()
elif isinstance(e, events.PasteEvent):
for ee in e.events:
self.add_normal_character(ee if len(ee) == 1 else ee[-1]) #strip control seq
elif e in rl_char_sequences:
self.cursor_offset_in_line, self._current_line = rl_char_sequences[e](self.cursor_offset_in_line, self._current_line)
elif e == "":
raise KeyboardInterrupt()
elif e == "":
raise SystemExit()
elif self.in_prompt and e in ("\n", "\r"):
line = self._current_line
self.escape()
self.request_greenlet.switch(line)
elif self.in_confirm:
if e in ('y', 'Y'):
self.request_greenlet.switch(True)
else:
self.request_greenlet.switch(False)
self.escape()
elif e in ['\x1b']:
self.request_greenlet.switch(False)
self.escape()
else: # add normal character
self.add_normal_character(e)
def add_normal_character(self, e):
self._current_line = (self._current_line[:self.cursor_offset_in_line] +
e +
self._current_line[self.cursor_offset_in_line:])
self.cursor_offset_in_line += 1
def escape(self):
"""unfocus from statusbar, clear prompt state, wait for notify call"""
self.in_prompt = False
self.in_confirm = False
self.prompt = ''
self._current_line = ''
@property
def current_line(self):
self._check_for_expired_message()
if self.in_prompt:
return self.prompt + self._current_line
if self.in_confirm:
return self.prompt
if self._message:
return self._message
return self.permanent_text
# interaction interface - should be called from other greenlets
def notify(self, msg, n=3):
self.request_greenlet = greenlet.getcurrent()
self.message_time = n
self.message(msg)
self.waiting_for_refresh = True
self.refresh_request()
self.main_greenlet.switch(msg)
# below Really ought to be called from greenlets other than main because they block
def confirm(self, q):
"""Expected to return True or False, given question prompt q"""
self.request_greenlet = greenlet.getcurrent()
self.prompt = q
self.in_confirm = True
return self.main_greenlet.switch(q)
def file_prompt(self, s):
"""Expected to return a file name, given """
self.request_greenlet = greenlet.getcurrent()
self.prompt = s
self.in_prompt = True
result = self.main_greenlet.switch(s)
return result
| cc0-1.0 | -3,079,365,304,622,484,500 | 37.007937 | 129 | 0.618083 | false |
cltrudeau/django-awl | awl/templatetags/awltags.py | 1 | 4065 | # awl.templatetags.awltags.py
from django import template
register = template.Library()
# ============================================================================
@register.filter
def getitem(dictionary, keyvar):
"""Custom django template filter that allows access to an item of a
dictionary through the key contained in a template variable. Example:
.. code-block:: python
context_data = {
'data':{
'foo':'bar',
},
'key':'foo',
}
template = Template('{% load awltags %}{{data|getitem:key}}')
context = Context(context_data)
result = template.render(context)
>>> result
'bar'
.. note::
Any KeyErrors are ignored and return an empty string
"""
try:
return dictionary[keyvar]
except KeyError:
return ''
# ----------------------------------------------------------------------------
@register.tag
def accessor(parser, token):
"""This template tag is used to do complex nested attribute accessing of
an object. The first parameter is the object being accessed, subsequent
paramters are one of:
* a variable in the template context
* a literal in the template context
* either of the above surrounded in square brackets
For each variable or literal parameter given a `getattr` is called on the
object, chaining to the next parameter. For any sqaure bracket enclosed
items the access is done through a dictionary lookup.
Example::
{% accessor car where 'front_seat' [position] ['fabric'] %}
The above would result in the following chain of commands:
.. code-block:: python
ref = getattr(car, where)
ref = getattr(ref, 'front_seat')
ref = ref[position]
return ref['fabric']
This tag also supports "as" syntax, putting the results into a template
variable::
{% accessor car 'interior' as foo %}
"""
contents = token.split_contents()
tag = contents[0]
if len(contents) < 3:
raise template.TemplateSyntaxError(('%s requires at least two '
'arguments: object and one or more getattr parms') % tag)
as_var = None
if len(contents) >= 4:
# check for "as" syntax
if contents[-2] == 'as':
as_var = contents[-1]
contents = contents[:-2]
return AccessorNode(contents[1], contents[2:], as_var)
class AccessorNode(template.Node):
def __init__(self, obj_name, parms, as_var):
self.obj_name = obj_name
self.parms = parms
self.as_var = as_var
def render(self, context):
try:
ref = context[self.obj_name]
for parm in self.parms:
if parm[0] == '"' or parm[0] == "'":
# parm is a literal
ref = getattr(ref, parm[1:-1])
elif parm[0] == '[':
# parm is a dictionary lookup
if parm[1] == '"' or parm[1] == "'":
# dict key is a literal
ref = ref[parm[2:-2]]
else:
# dict key is a template var
key = context[parm[1:-1]]
ref = ref[key]
else:
# parm is a template var
attr = context[parm]
ref = getattr(ref, attr)
if self.as_var:
context[self.as_var] = ref
return ''
return ref
except:
# any lookup errors should result in empty
if self.as_var:
context[self.as_var] = ''
return ''
# ----------------------------------------------------------------------------
@register.simple_tag
def nop(*args):
"""This tag does nothing. Useful for a comment without having to build a
full comment block. All parameters are ignored.
Example::
{% nop 'this is a string' %}
"""
return ''
| mit | -5,902,167,371,774,256,000 | 28.456522 | 78 | 0.512423 | false |
squidsrc/python-rocksdb | setup.py | 1 | 1079 | from setuptools import setup, find_packages
from distutils.extension import Extension
from Cython.Build import cythonize
extension_defaults = {
'extra_compile_args': [
'-std=gnu++11',
'-O3',
'-Wall',
'-Wextra',
'-Wconversion',
'-fno-strict-aliasing'
],
'language': 'c++',
'libraries': [
'rt',
'snappy',
'bz2',
'z'
],
'extra_objects': ['librocksdb.a']
}
mod1 = Extension(
'rocksdb._rocksdb',
['rocksdb/_rocksdb.pyx'],
**extension_defaults
)
setup(
name="pyrocksdb",
version='0.3',
description="Python bindings for RocksDB",
keywords='rocksdb',
author='Stephan Hofmockel',
author_email="Use the github issues",
url="https://github.com/stephan-hof/pyrocksdb",
license='BSD License',
install_requires=[
'setuptools',
'Cython>=0.20',
],
package_dir={'rocksdb': 'rocksdb'},
packages=find_packages('.'),
ext_modules=cythonize([mod1]),
test_suite='rocksdb.tests',
include_package_data=True
)
| bsd-3-clause | 5,162,525,097,040,509,000 | 21.479167 | 51 | 0.578313 | false |
nateprewitt/pipenv | pipenv/project.py | 1 | 17449 | # -*- coding: utf-8 -*-
import json
import os
import re
import sys
import base64
import hashlib
import contoml
import delegator
import pipfile
import toml
from .utils import (
mkdir_p, convert_deps_from_pip, pep423_name, recase_file,
find_requirements, is_file, is_vcs, python_version, cleanup_toml,
is_installable_file, is_valid_url
)
from .environments import PIPENV_MAX_DEPTH, PIPENV_VENV_IN_PROJECT
from .environments import PIPENV_VIRTUALENV, PIPENV_PIPFILE
if PIPENV_PIPFILE:
if not os.path.isfile(PIPENV_PIPFILE):
raise RuntimeError('Given PIPENV_PIPFILE is not found!')
else:
PIPENV_PIPFILE = os.path.abspath(PIPENV_PIPFILE)
class Project(object):
"""docstring for Project"""
def __init__(self, chdir=True):
super(Project, self).__init__()
self._name = None
self._virtualenv_location = None
self._download_location = None
self._proper_names_location = None
self._pipfile_location = None
self._requirements_location = None
self._original_dir = os.path.abspath(os.curdir)
# Hack to skip this during pipenv run, or -r.
if ('run' not in sys.argv) and chdir:
try:
os.chdir(self.project_directory)
except (TypeError, AttributeError):
pass
def path_to(self, p):
"""Returns the absolute path to a given relative path."""
if os.path.isabs(p):
return p
return os.sep.join([self._original_dir, p])
def _build_package_list(self, package_section):
"""Returns a list of packages for pip-tools to consume."""
ps = {}
# TODO: Separate the logic for showing packages from the filters for supplying pip-tools
for k, v in self.parsed_pipfile.get(package_section, {}).items():
# Skip editable VCS deps.
if hasattr(v, 'keys'):
# When a vcs url is gven without editable it only appears as a key
# Eliminate any vcs, path, or url entries which are not editable
# Since pip-tools can't do deep resolution on them, even setuptools-installable ones
if (is_vcs(v) or is_vcs(k) or (is_installable_file(k) or is_installable_file(v)) or
any((prefix in v and
(os.path.isfile(v[prefix]) or is_valid_url(v[prefix])))
for prefix in ['path', 'file'])):
# If they are editable, do resolve them
if 'editable' not in v:
continue
else:
ps.update({k: v})
else:
ps.update({k: v})
else:
# Since these entries have no attributes we know they are not editable
# So we can safely exclude things that need to be editable in order to be resolved
# First exclude anything that is a vcs entry either in the key or value
if not (any(is_vcs(i) for i in [k, v]) or
# Then exclude any installable files that are not directories
# Because pip-tools can resolve setup.py for example
any(is_installable_file(i) for i in [k, v]) or
# Then exclude any URLs because they need to be editable also
# Things that are excluded can only be 'shallow resolved'
any(is_valid_url(i) for i in [k, v])):
ps.update({k: v})
return ps
@property
def name(self):
if self._name is None:
self._name = self.pipfile_location.split(os.sep)[-2]
return self._name
@property
def pipfile_exists(self):
return bool(self.pipfile_location)
@property
def required_python_version(self):
if self.pipfile_exists:
required = self.parsed_pipfile.get('requires', {}).get('python_full_version')
if not required:
required = self.parsed_pipfile.get('requires', {}).get('python_version')
if required != "*":
return required
@property
def project_directory(self):
if self.pipfile_location is not None:
return os.path.abspath(os.path.join(self.pipfile_location, os.pardir))
else:
return None
@property
def requirements_exists(self):
return bool(self.requirements_location)
@property
def virtualenv_exists(self):
# TODO: Decouple project from existence of Pipfile.
if self.pipfile_exists and os.path.exists(self.virtualenv_location):
if os.name == 'nt':
extra = ['Scripts', 'activate.bat']
else:
extra = ['bin', 'activate']
return os.path.isfile(os.sep.join([self.virtualenv_location] + extra))
return False
@property
def virtualenv_name(self):
# Replace dangerous characters into '_'. The length of the sanitized
# project name is limited as 42 because of the limit of linux kernel
#
# 42 = 127 - len('/home//.local/share/virtualenvs//bin/python2') - 32 - len('-HASHHASH')
#
# 127 : BINPRM_BUF_SIZE - 1
# 32 : Maximum length of username
#
# References:
# https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html
# http://www.tldp.org/LDP/abs/html/special-chars.html#FIELDREF
# https://github.com/torvalds/linux/blob/2bfe01ef/include/uapi/linux/binfmts.h#L18
sanitized = re.sub(r'[ $`!*@"\\\r\n\t]', '_', self.name)[0:42]
# Hash the full path of the pipfile
hash = hashlib.sha256(self.pipfile_location.encode()).digest()[:6]
encoded_hash = base64.urlsafe_b64encode(hash).decode()
# If the pipfile was located at '/home/user/MY_PROJECT/Pipfile',
# the name of its virtualenv will be 'my-project-wyUfYPqE'
return sanitized + '-' + encoded_hash
@property
def virtualenv_location(self):
# if VIRTUAL_ENV is set, use that.
if PIPENV_VIRTUALENV:
return PIPENV_VIRTUALENV
# Use cached version, if available.
if self._virtualenv_location:
return self._virtualenv_location
# The user wants the virtualenv in the project.
if not PIPENV_VENV_IN_PROJECT:
c = delegator.run('pew dir "{0}"'.format(self.virtualenv_name))
loc = c.out.strip()
# Default mode.
else:
loc = os.sep.join(self.pipfile_location.split(os.sep)[:-1] + ['.venv'])
self._virtualenv_location = loc
return loc
@property
def virtualenv_src_location(self):
loc = os.sep.join([self.virtualenv_location, 'src'])
mkdir_p(loc)
return loc
@property
def download_location(self):
if self._download_location is None:
loc = os.sep.join([self.virtualenv_location, 'downloads'])
self._download_location = loc
# Create the directory, if it doesn't exist.
mkdir_p(self._download_location)
return self._download_location
@property
def proper_names_location(self):
if self._proper_names_location is None:
loc = os.sep.join([self.virtualenv_location, 'pipenv-proper-names.txt'])
self._proper_names_location = loc
# Create the database, if it doesn't exist.
open(self._proper_names_location, 'a').close()
return self._proper_names_location
@property
def proper_names(self):
with open(self.proper_names_location) as f:
return f.read().splitlines()
def register_proper_name(self, name):
"""Registers a proper name to the database."""
with open(self.proper_names_location, 'a') as f:
f.write('{0}\n'.format(name))
@property
def pipfile_location(self):
if PIPENV_PIPFILE:
return PIPENV_PIPFILE
if self._pipfile_location is None:
try:
loc = pipfile.Pipfile.find(max_depth=PIPENV_MAX_DEPTH)
except RuntimeError:
loc = None
self._pipfile_location = loc
return self._pipfile_location
@property
def requirements_location(self):
if self._requirements_location is None:
try:
loc = find_requirements(max_depth=PIPENV_MAX_DEPTH)
except RuntimeError:
loc = None
self._requirements_location = loc
return self._requirements_location
@property
def parsed_pipfile(self):
# Open the pipfile, read it into memory.
with open(self.pipfile_location) as f:
contents = f.read()
# If any outline tables are present...
if ('[packages.' in contents) or ('[dev-packages.' in contents):
data = toml.loads(contents)
# Convert all outline tables to inline tables.
for section in ('packages', 'dev-packages'):
for package in data.get(section, {}):
# Convert things to inline tables — fancy :)
if hasattr(data[section][package], 'keys'):
_data = data[section][package]
data[section][package] = toml._get_empty_inline_table(dict)
data[section][package].update(_data)
# We lose comments here, but it's for the best.)
try:
return contoml.loads(toml.dumps(data, preserve=True))
except RuntimeError:
return toml.loads(toml.dumps(data, preserve=True))
else:
# Fallback to toml parser, for large files.
try:
return contoml.loads(contents)
except Exception:
return toml.loads(contents)
@property
def _pipfile(self):
"""Pipfile divided by PyPI and external dependencies."""
pfile = self.parsed_pipfile
for section in ('packages', 'dev-packages'):
p_section = pfile.get(section, {})
for key in list(p_section.keys()):
# Normalize key name to PEP 423.
norm_key = pep423_name(key)
p_section[norm_key] = p_section.pop(key)
return pfile
@property
def settings(self):
"""A dictionary of the settings added to the Pipfile."""
return self.parsed_pipfile.get('pipenv', {})
def update_settings(self, d):
settings = self.settings
changed = False
for new in d:
if new not in settings:
settings[new] = d[new]
changed = True
if changed:
p = self.parsed_pipfile
p['pipenv'] = settings
# Write the changes to disk.
self.write_toml(p)
@property
def _lockfile(self):
"""Pipfile.lock divided by PyPI and external dependencies."""
pfile = pipfile.load(self.pipfile_location)
lockfile = json.loads(pfile.lock())
for section in ('default', 'develop'):
lock_section = lockfile.get(section, {})
for key in list(lock_section.keys()):
norm_key = pep423_name(key)
lockfile[section][norm_key] = lock_section.pop(key)
return lockfile
@property
def lockfile_location(self):
return '{0}.lock'.format(self.pipfile_location)
@property
def lockfile_exists(self):
return os.path.isfile(self.lockfile_location)
@property
def lockfile_content(self):
with open(self.lockfile_location) as lock:
return json.load(lock)
@property
def vcs_packages(self):
"""Returns a list of VCS packages, for not pip-tools to consume."""
ps = {}
for k, v in self.parsed_pipfile.get('packages', {}).items():
if is_vcs(v) or is_vcs(k):
ps.update({k: v})
return ps
@property
def vcs_dev_packages(self):
"""Returns a list of VCS packages, for not pip-tools to consume."""
ps = {}
for k, v in self.parsed_pipfile.get('dev-packages', {}).items():
if is_vcs(v) or is_vcs(k):
ps.update({k: v})
return ps
@property
def all_packages(self):
"""Returns a list of all packages."""
p = dict(self.parsed_pipfile.get('dev-packages', {}))
p.update(self.parsed_pipfile.get('packages', {}))
return p
@property
def packages(self):
"""Returns a list of packages, for pip-tools to consume."""
return self._build_package_list('packages')
@property
def dev_packages(self):
"""Returns a list of dev-packages, for pip-tools to consume."""
return self._build_package_list('dev-packages')
def touch_pipfile(self):
"""Simply touches the Pipfile, for later use."""
with open('Pipfile', 'a'):
os.utime('Pipfile', None)
@property
def pipfile_is_empty(self):
if not self.pipfile_exists:
return True
with open(self.pipfile_location, 'r') as f:
if not f.read():
return True
return False
def create_pipfile(self, python=None):
"""Creates the Pipfile, filled with juicy defaults."""
data = {
# Default source.
u'source': [
{u'url': u'https://pypi.python.org/simple', u'verify_ssl': True, 'name': 'pypi'}
],
# Default packages.
u'packages': {},
u'dev-packages': {},
}
# Default requires.
if python:
data[u'requires'] = {'python_version': python_version(python)[:len('2.7')]}
self.write_toml(data, 'Pipfile')
def write_toml(self, data, path=None):
"""Writes the given data structure out as TOML."""
if path is None:
path = self.pipfile_location
try:
formatted_data = contoml.dumps(data).rstrip()
except Exception:
for section in ('packages', 'dev-packages'):
for package in data[section]:
# Convert things to inline tables — fancy :)
if hasattr(data[section][package], 'keys'):
_data = data[section][package]
data[section][package] = toml._get_empty_inline_table(dict)
data[section][package].update(_data)
formatted_data = toml.dumps(data).rstrip()
formatted_data = cleanup_toml(formatted_data)
with open(path, 'w') as f:
f.write(formatted_data)
@property
def sources(self):
if self.lockfile_exists:
meta_ = self.lockfile_content['_meta']
sources_ = meta_.get('sources')
if sources_:
return sources_
if 'source' in self.parsed_pipfile:
return self.parsed_pipfile['source']
else:
return [{u'url': u'https://pypi.python.org/simple', u'verify_ssl': True, 'name': 'pypi'}]
def get_source(self, name=None, url=None):
for source in self.sources:
if name:
if source.get('name') == name:
return source
elif url:
if source.get('url') in url:
return source
def destroy_lockfile(self):
"""Deletes the lockfile."""
try:
return os.remove(self.lockfile_location)
except OSError:
pass
def remove_package_from_pipfile(self, package_name, dev=False):
# Read and append Pipfile.
p = self._pipfile
package_name = pep423_name(package_name)
key = 'dev-packages' if dev else 'packages'
if key in p and package_name in p[key]:
del p[key][package_name]
# Write Pipfile.
self.write_toml(recase_file(p))
def add_package_to_pipfile(self, package_name, dev=False):
# Read and append Pipfile.
p = self._pipfile
# Don't re-capitalize file URLs or VCSs.
converted = convert_deps_from_pip(package_name)
converted = converted[[k for k in converted.keys()][0]]
if not (is_file(package_name) or is_vcs(converted) or 'path' in converted):
package_name = pep423_name(package_name)
key = 'dev-packages' if dev else 'packages'
# Set empty group if it doesn't exist yet.
if key not in p:
p[key] = {}
package = convert_deps_from_pip(package_name)
package_name = [k for k in package.keys()][0]
# Add the package to the group.
p[key][package_name] = package[package_name]
# Write Pipfile.
self.write_toml(p)
def add_index_to_pipfile(self, index):
"""Adds a given index to the Pipfile."""
# Read and append Pipfile.
p = self._pipfile
source = {'url': index, 'verify_ssl': True}
# Add the package to the group.
if 'source' not in p:
p['source'] = [source]
else:
p['source'].append(source)
# Write Pipfile.
self.write_toml(p)
def recase_pipfile(self):
self.write_toml(recase_file(self._pipfile))
| mit | 562,355,048,727,634,100 | 32.41954 | 101 | 0.562224 | false |
mrcslws/nupic.research | projects/dynamic_sparse/runs/run_test.py | 1 | 1756 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
from nupic.research.frameworks.dynamic_sparse.common.utils import run_ray
# alternative initialization based on configuration
exp_config = dict(
device="cuda",
network="resnet18",
dataset_name="CIFAR10",
input_size=(3, 32, 32),
num_classes=10,
stats_mean=(0.4914, 0.4822, 0.4465),
stats_std=(0.2023, 0.1994, 0.2010),
model="SparseModel",
data_dir="~/nta/data",
on_perc=0.2,
batch_size_train=10,
batch_size_test=10,
)
# run
tune_config = dict(
name=__file__,
num_samples=1,
local_dir=os.path.expanduser("~/nta/results"),
checkpoint_freq=0,
checkpoint_at_end=False,
stop={"training_iteration": 10},
resources_per_trial={"cpu": 1, "gpu": 1},
verbose=2,
)
run_ray(tune_config, exp_config)
| agpl-3.0 | 5,158,335,504,190,792,000 | 31.518519 | 73 | 0.646925 | false |
wendlers/scratch-pynetsense | example-src/WrappedRemoteSensor.py | 1 | 2457 | ##
# This file is part of the Scratch Remote Sensor (SRS) Library project
#
# Copyright (C) 2012 Stefan Wendler <[email protected]>
#
# The SRS Library is free software; you can redistribute
# it and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SRS Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with the JSherpa firmware; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA.
##
'''
This file is part of the Scratch Remote Sensor Library project
'''
import time
import socket
import logging
from scratch.remotesensor import RemoteSensor, DEFAULT_HOST, DEFAULT_PORT
class WrappedRemoteSensor(RemoteSensor):
'''
This example shows how to write a baic wrapped remote sensor. It reads
"/proc/meminfo" and parses out the values for "memtotal" and "memfree".
Each time one of this values changes, a sensor-update is send to the
server.
To start this sensor, pass it as a wrapper to the wrapper daemon:
source setenv.sh
python src/scratch/wrappers/daemon.py --foreground --loglevel DEBUG \
--wrap WrappedRemoteSensor#WrappedRemoteSensor start
'''
__args = None
# name used e.g. for heartbeat
name = "wrap"
def __init__(self, myArgs = {}):
'''
Create a new instance of the monitoring remote sensor.
@param myArgs arguments for the sensor: host and port.
'''
RemoteSensor.__init__(self, args = myArgs)
def worker(self):
'''
Read memory info from proc filesystem (memtotal and memfree). If the
value changed, send a sensor-update message to the server.
'''
try:
f = open('/proc/meminfo', 'r')
lines = f.readlines()
f.close()
changed = False
for l in lines:
w = l.split(':')
k = w[0].strip().lower()
v = int(w[1].strip().split(' ')[0])
# this are the only field we are interested in
if k in [ 'memtotal', 'memfree']:
if self.values.set(k, v):
changed = True
if changed:
self.bcastMsg('input-changed')
except Exception as e:
logging.error(e)
| lgpl-2.1 | -1,918,675,807,934,093,300 | 26.606742 | 74 | 0.699634 | false |
luotao1/Paddle | python/paddle/tensor/logic.py | 1 | 16431 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.layers.layer_function_generator import templatedoc
from .. import fluid
from ..fluid.framework import in_dygraph_mode
from paddle.common_ops_import import *
from ..framework import VarBase as Tensor
# TODO: define logic functions of a tensor
from ..fluid.layers import is_empty #DEFINE_ALIAS
from ..fluid.layers import logical_and #DEFINE_ALIAS
from ..fluid.layers import logical_not #DEFINE_ALIAS
from ..fluid.layers import logical_or #DEFINE_ALIAS
from ..fluid.layers import logical_xor #DEFINE_ALIAS
__all__ = [
'equal',
'equal_all',
'greater_equal',
'greater_than',
'is_empty',
'less_equal',
'less_than',
'logical_and',
'logical_not',
'logical_or',
'logical_xor',
'not_equal',
'allclose',
'is_tensor'
# 'isnan'
]
def equal_all(x, y, name=None):
"""
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): Tensor, data type is float32, float64, int32, int64.
y(Tensor): Tensor, data type is float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: output Tensor, data type is bool, value is [False] or [True].
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 2, 3])
z = paddle.to_tensor([1, 4, 3])
result1 = paddle.equal_all(x, y)
print(result1) # result1 = [True ]
result2 = paddle.equal_all(x, z)
print(result2) # result2 = [False ]
"""
helper = LayerHelper("equal_all", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(
type='equal_all', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
${comment}
Args:
x(Tensor): ${input_comment}.
y(Tensor): ${other_comment}.
rtol(rtoltype, optional): The relative tolerance. Default: :math:`1e-5` .
atol(atoltype, optional): The absolute tolerance. Default: :math:`1e-8` .
equal_nan(equalnantype, optional): ${equal_nan_comment}.
name (str, optional): Name for the operation. For more information, please
refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: ${out_comment}.
Raises:
TypeError: The data type of ``x`` must be one of float32, float64.
TypeError: The data type of ``y`` must be one of float32, float64.
TypeError: The type of ``rtol`` must be float.
TypeError: The type of ``atol`` must be float.
TypeError: The type of ``equal_nan`` must be bool.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([10000., 1e-07])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [False]
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True]
"""
if in_dygraph_mode():
return core.ops.allclose(x, y, 'rtol',
str(rtol), 'atol',
str(atol), 'equal_nan', equal_nan)
check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose')
check_type(rtol, 'rtol', float, 'allclose')
check_type(atol, 'atol', float, 'allclose')
check_type(equal_nan, 'equal_nan', bool, 'allclose')
helper = LayerHelper("allclose", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
inputs = {'Input': x, 'Other': y}
outputs = {'Out': out}
attrs = {'rtol': str(rtol), 'atol': str(atol), 'equal_nan': equal_nan}
helper.append_op(
type='allclose', inputs=inputs, outputs=outputs, attrs=attrs)
return out
@templatedoc()
def equal(x, y, name=None):
"""
This layer returns the truth value of :math:`x == y` elementwise.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): Tensor, data type is float32, float64, int32, int64.
y(Tensor): Tensor, data type is float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: output Tensor, it's shape is the same as the input's Tensor,
and the data type is bool. The result of this op is stop_gradient.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.equal(x, y)
print(result1) # result1 = [True False False]
"""
if in_dygraph_mode():
return core.ops.equal(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"equal")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"equal")
helper = LayerHelper("equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def greater_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_equal(x, y)
print(result1) # result1 = [True False True]
"""
if in_dygraph_mode():
return core.ops.greater_equal(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"greater_equal")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"greater_equal")
helper = LayerHelper("greater_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='greater_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [out]})
return out
@templatedoc()
def greater_than(x, y, name=None):
"""
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x` .
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.greater_than(x, y)
print(result1) # result1 = [False False True]
"""
if in_dygraph_mode():
return core.ops.greater_than(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"greater_than")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"greater_than")
helper = LayerHelper("greater_than", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='greater_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [out]})
return out
@templatedoc()
def less_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_equal(x, y)
print(result1) # result1 = [True True False]
"""
if in_dygraph_mode():
return core.ops.less_equal(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_equal")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"less_equal")
helper = LayerHelper("less_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='less_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def less_than(x, y, name=None):
"""
This OP returns the truth value of :math:`x < y` elementwise, which is equivalent function to the overloaded operator `<`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.less_than(x, y)
print(result1) # result1 = [False True False]
"""
if in_dygraph_mode():
return core.ops.less_than(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"less_than")
helper = LayerHelper("less_than", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='less_than', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
@templatedoc()
def not_equal(x, y, name=None):
"""
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
**NOTICE**: The output of this OP has no gradient.
Args:
x(Tensor): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Tensor): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output data type is bool: The tensor storing the output, the output shape is same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3])
y = paddle.to_tensor([1, 3, 2])
result1 = paddle.not_equal(x, y)
print(result1) # result1 = [False True True]
"""
if in_dygraph_mode():
return core.ops.not_equal(x, y)
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"not_equal")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"not_equal")
helper = LayerHelper("not_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='not_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [out]})
return out
def is_tensor(x):
"""
This function tests whether input object is a paddle.Tensor.
Args:
x (object): Object to test.
Returns:
A boolean value. True if 'x' is a paddle.Tensor, otherwise False.
Examples:
.. code-block:: python
import paddle
input1 = paddle.rand(shape=[2, 3, 5], dtype='float32')
check = paddle.is_tensor(input1)
print(check) #True
input3 = [1, 4]
check = paddle.is_tensor(input3)
print(check) #False
"""
return isinstance(x, Tensor)
| apache-2.0 | 1,783,713,438,296,025,000 | 35.676339 | 128 | 0.585296 | false |
alsrgv/tensorflow | tensorflow/python/ops/math_grad.py | 1 | 57271 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // math_ops.maximum(y, 1)
@ops.RegisterGradient("ArgMax")
def _ArgMaxGrad(op, grad):
del op, grad
return [None, None]
@ops.RegisterGradient("ArgMin")
def _ArgMinGrad(op, grad):
del op, grad
return [None, None]
# TODO(rmlarsen): Implement gradient.
ops.NotDifferentiable("EuclideanNorm")
_empty_tuple = ()
def _IsScalar(x):
return x._shape_tuple() is _empty_tuple # pylint: disable=protected-access
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
if input_0_shape is not None:
axes = tensor_util.constant_value(op.inputs[1])
if axes is not None:
rank = len(input_0_shape)
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
if context.executing_eagerly():
ctx = context.context()
new_shape = ctx.ones_rank_cache().get(rank)
if new_shape is None:
new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32)
ctx.ones_rank_cache().put(rank, new_shape)
else:
new_shape = [1] * rank
grad = array_ops.reshape(grad, new_shape)
# If shape is not fully defined (but rank is), we use Shape.
if None not in input_0_shape:
input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32)
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
# TODO(apassos) remove this once device placement for eager ops makes more
# sense.
with ops.colocate_with(input_shape):
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Min or Max. Amazingly it's precisely the same code."""
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
y = op.outputs[0]
y = array_ops.reshape(y, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
# Compute the number of selected (maximum or minimum) elements in each
# reduction dimension. If there are multiple minimum or maximum elements
# then the gradient will be divided between them.
indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)
num_selected = array_ops.reshape(
math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)
return [math_ops.divide(indicators, num_selected) * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access
if (input_shape is not None and output_shape is not None and
None not in input_shape and None not in output_shape):
input_size = np.prod(input_shape)
output_size = np.prod(output_shape)
factor = input_size // max(output_size, 1)
factor = constant_op.constant(factor, dtype=sum_grad.dtype)
else:
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = _safe_shape_div(
math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# The gradient can be expressed by dividing the product by each entry of the
# input tensor, but this approach can't deal with zeros in the input.
# Here, we avoid this problem by composing the output as a product of two
# cumprod operations.
input_shape = array_ops.shape(op.inputs[0])
# Reshape reduction indices for the case where the parameter is a scalar
reduction_indices = array_ops.reshape(op.inputs[1], [-1])
# Expand grad to full input shape
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
grad = array_ops.tile(grad, tile_scaling)
# Pack all reduced dimensions into a single one, so we can perform the
# cumprod ops. If the reduction dims list is empty, it defaults to float32,
# so we need to cast here. We put all the shape-related ops on CPU to avoid
# copying back and forth, and since listdiff is CPU only.
with ops.device("/cpu:0"):
rank = array_ops.rank(op.inputs[0])
reduction_indices = (reduction_indices + rank) % rank
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, rank)
other, _ = array_ops.setdiff1d(idx, reduced)
perm = array_ops.concat([reduced, other], 0)
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
permuted_shape = array_ops.shape(permuted)
reshaped = array_ops.reshape(permuted, (reduced_num, other_num))
# Calculate product, leaving out the current entry
left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
# For complex inputs, the gradient is in the conjugate direction.
y = array_ops.reshape(
math_ops.conj(left) * math_ops.conj(right), permuted_shape)
# Invert the transpose and reshape operations.
# Make sure to set the statically known shape information through a reshape.
out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
return array_ops.reshape(out, input_shape), None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat([
array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
], 0)
ones = array_ops.fill(ones_shape, constant_op.constant(1, dtype=grad.dtype))
scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None)
@ops.RegisterGradient("SparseSegmentSumWithNumSegments")
def _SparseSegmentSumWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSumWithNumSegments."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None, None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentMeanWithNumSegments")
def _SparseSegmentMeanWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentMeanWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
@ops.RegisterGradient("SparseSegmentSqrtN")
def _SparseSegmentSqrtNGrad(op, grad):
"""Gradient for SparseSegmentSqrtN."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentSqrtNWithNumSegments")
def _SparseSegmentSqrtNWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSqrtNWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
def _SegmentMinOrMaxGrad(op, grad):
""" Gradient for SegmentMin and SegmentMax. """
zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
num_selected = math_ops.segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
return array_ops.where(is_selected, gathered_grads, zeros), None
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
return _SegmentMinOrMaxGrad(op, grad)
def _GatherDropNegatives(params,
ids,
zero_clipped_indices=None,
is_positive=None):
""" Helper function for unsorted segment ops.
Gathers params for
positive segment ids and gathers 0 for inputs with negative segment id.
Also returns the clipped indices and a boolean mask with the same shape
as ids where a positive id is masked as true. With this, the latter two
can be passed as arguments to this function to reuse them.
"""
if zero_clipped_indices is None:
zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))
gathered = array_ops.gather(params, zero_clipped_indices)
if is_positive is None:
is_positive = math_ops.greater_equal(ids, 0)
# tf.where(condition, x, y) requires condition to have the same shape as x
# and y.
# todo(philjd): remove this if tf.where supports broadcasting (#9284)
for _ in range(gathered.shape.ndims - is_positive.shape.ndims):
is_positive = array_ops.expand_dims(is_positive, -1)
is_positive = (
is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool))
# replace gathered params of negative indices with 0
zero_slice = array_ops.zeros_like(gathered)
return (array_ops.where(is_positive, gathered, zero_slice),
zero_clipped_indices, is_positive)
def _UnsortedSegmentMinOrMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs, zero_clipped_indices, is_positive = \
_GatherDropNegatives(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
is_selected = math_ops.logical_and(is_selected, is_positive)
num_selected = math_ops.unsorted_segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,
zero_clipped_indices, is_positive)
zeros = array_ops.zeros_like(gathered_grads)
return array_ops.where(is_selected, gathered_grads, zeros), None, None
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for UnsortedSegmentSum."""
return _GatherDropNegatives(grad, op.inputs[1])[0], None, None
@ops.RegisterGradient("UnsortedSegmentMax")
def _UnsortedSegmentMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMax. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentMin")
def _UnsortedSegmentMinGrad(op, grad):
""" Gradient for UnsortedSegmentMin. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentProd")
def _UnsortedSegmentProdGrad(op, grad):
""" Gradient for UnsortedSegmentProd.
The gradient can be expressed for each segment by dividing the segment's
product by each element of the segment input tensor, but this approach can't
deal with zeros in the input.
Unlike reduce_prod we can't use cumsum here as individual segments may have
a different number of elements. Therefore we consider three cases:
1) A segment input contains no zeros and we can safely divide by the input
tensor.
2) A segment contains exactly one zero. Then the gradient of each input of
the segment is zero except for the 0-input, there the gradient is
the product of the remaining segment entries.
3) A segment contains at least two zeros. The gradient is zero for all
segment inputs.
"""
# Note that unsorted_segment_sum will filter out the negative indices,
# so we don't need to do a logical_and with is_positive here
is_zero = math_ops.equal(op.inputs[0], 0)
num_zeros = gen_math_ops.unsorted_segment_sum(
math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2])
# handle case 3 and set the gradient to 0 for segments with more than one
# 0 as input
grad = array_ops.where(
math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)
# replace all zeros with ones and compute the unsorted_segment_prod
non_zero_data = array_ops.where(is_zero, array_ops.ones_like(op.inputs[0]),
op.inputs[0])
non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data,
op.inputs[1], op.inputs[2])
# clip the indices for gather to be positive
zero_clipped_indices = math_ops.maximum(op.inputs[1],
array_ops.zeros_like(op.inputs[1]))
gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices)
gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices)
prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf.
# Now fetch the individual results for segments containing 0 and those that
# don't. is_zero will also fetch results for entries with negative index
# but the following gather_drop_negatives sets the corresponding entry in
# grad to 0 for these
partial_derivative = array_ops.where(is_zero, gathered_non_zero_prod,
prod_divided_by_el)
gathered_grad = _GatherDropNegatives(grad, op.inputs[1],
zero_clipped_indices)[0]
return gathered_grad * partial_derivative, None, None
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("Reciprocal")
def _ReciprocalGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("InvGrad")
def _InvGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("ReciprocalGrad")
def _ReciprocalGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
# Added control dependencies to prevent 2*x from being computed too early.
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = constant_op.constant(2.0, dtype=x.dtype)
return math_ops.multiply(grad, math_ops.multiply(x, y))
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return gen_math_ops.sqrt_grad(y, grad)
@ops.RegisterGradient("SqrtGrad")
def _SqrtGradGrad(op, grad):
a = op.inputs[0]
y = op.outputs[0] # y = 0.5 * b / conj(a)
with ops.control_dependencies([grad]):
if compat.forward_compatible(2019, 9, 14):
ga = gen_math_ops.xdivy(grad, a)
return -gen_math_ops.mul_no_nan(y, math_ops.conj(ga)), 0.5 * ga
else:
ga = grad / a
return -math_ops.conj(ga) * y, 0.5 * ga
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
"""Returns -0.5 * grad * conj(y)^3."""
y = op.outputs[0] # y = x^(-1/2)
return gen_math_ops.rsqrt_grad(y, grad)
@ops.RegisterGradient("RsqrtGrad")
def _RsqrtGradGrad(op, grad):
"""Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
a = op.inputs[0] # a = x^{-1/2}
b = op.inputs[1] # backprop gradient for a
with ops.control_dependencies([grad]):
ca = math_ops.conj(a)
cg = math_ops.conj(grad)
grad_a = -1.5 * cg * b * math_ops.square(ca)
grad_b = gen_math_ops.rsqrt_grad(ca, grad)
return grad_a, grad_b
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(y, grad)
else:
return grad * y
@ops.RegisterGradient("Expm1")
def _Expm1Grad(op, grad):
"""Returns grad * exp(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = math_ops.exp(x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(y, grad)
else:
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return gen_math_ops.xdivy(grad, x)
else:
return grad * math_ops.reciprocal(x)
@ops.RegisterGradient("Log1p")
def _Log1pGrad(op, grad):
"""Returns grad * (1/(1 + x))."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return gen_math_ops.xdivy(grad, 1 + x)
else:
return grad * math_ops.reciprocal(1 + x)
@ops.RegisterGradient("Xlogy")
def _XLogyGrad(op, grad):
"""Returns gradient of xlogy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xlogy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(x, y)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Xdivy")
def _XDivyGrad(op, grad):
"""Returns gradient of xdivy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xdivy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Sinh")
def _SinhGrad(op, grad):
"""Returns grad * cosh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cosh(x)
@ops.RegisterGradient("Cosh")
def _CoshGrad(op, grad):
"""Returns grad * sinh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.sinh(x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.tanh_grad(y, grad)
@ops.RegisterGradient("Asinh")
def _AsinhGrad(op, grad):
"""Returns grad * 1/cosh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return grad / math_ops.cosh(y)
@ops.RegisterGradient("Acosh")
def _AcoshGrad(op, grad):
"""Returns grad * 1/sinh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.xdivy(grad, math_ops.sinh(y))
else:
return grad / math_ops.sinh(y)
@ops.RegisterGradient("Atanh")
def _AtanhGrad(op, grad):
"""Returns grad * 1/ (1 - x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.subtract(one, x2))
return grad * inv
@ops.RegisterGradient("TanhGrad")
def _TanhGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad)
@ops.RegisterGradient("Erf")
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfc")
def _ErfcGrad(op, grad):
"""Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
minus_two_over_root_pi = constant_op.constant(
-2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Lgamma")
def _LgammaGrad(op, grad):
"""Returns grad * digamma(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(math_ops.digamma(x), grad)
else:
return grad * math_ops.digamma(x)
@ops.RegisterGradient("Digamma")
def _DigammaGrad(op, grad):
"""Compute gradient of the digamma function with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(partial_x, grad)
else:
return grad * partial_x
@ops.RegisterGradient("BesselI0e")
def _BesselI0eGrad(op, grad):
"""Compute gradient of bessel_i0e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
partial_x = (math_ops.bessel_i1e(x) - math_ops.sign(x) * y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(partial_x, grad)
else:
return grad * partial_x
@ops.RegisterGradient("BesselI1e")
def _BesselI1eGrad(op, grad):
"""Compute gradient of bessel_i1e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 0.5.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0e and
# bessel_i2e, but the latter is not yet implemented in Eigen.
eps = np.finfo(x.dtype.as_numpy_dtype).eps
zeros = array_ops.zeros_like(x)
x_is_not_tiny = math_ops.abs(x) > eps
safe_x = array_ops.where(x_is_not_tiny, x, eps + zeros)
dy_dx = math_ops.bessel_i0e(safe_x) - y * (
math_ops.sign(safe_x) + math_ops.reciprocal(safe_x))
dy_dx = array_ops.where(x_is_not_tiny, dy_dx, 0.5 + zeros)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(dy_dx, grad)
else:
return grad * dy_dx
@ops.RegisterGradient("Igamma")
def _IgammaGrad(op, grad):
"""Returns gradient of igamma(a, x) with respect to a and x."""
a = op.inputs[0]
x = op.inputs[1]
sa = array_ops.shape(a)
sx = array_ops.shape(x)
ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
with ops.control_dependencies([grad]):
partial_a = gen_math_ops.igamma_grad_a(a, x)
# Perform operations in log space before summing, because Gamma(a)
# and Gamma'(a) can grow large.
partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) -
math_ops.lgamma(a))
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_a, grad), ra), sa),
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),
sx))
else:
return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa),
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Igammac")
def _IgammacGrad(op, grad):
"""Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x."""
igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad)
return (-igamma_grad_a, -igamma_grad_x)
@ops.RegisterGradient("Betainc")
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
_, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (
gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
gen_math_ops.lgamma(a + b))
partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
(a - 1) * math_ops.log(x) - log_beta)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (
None, # da
None, # db
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx))
else:
return (
None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Zeta")
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (None,
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_q, grad), rq),
sq))
else:
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
@ops.RegisterGradient("Polygamma")
def _PolygammaGrad(op, grad):
"""Returns gradient of psi(n, x) with respect to n and x."""
# TODO(tillahoffmann): Add derivative with respect to n
n = op.inputs[0]
x = op.inputs[1]
# Broadcast gradients
sn = array_ops.shape(n)
sx = array_ops.shape(x)
unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)
# Evaluate gradient
with ops.control_dependencies([grad]):
n = math_ops.conj(n)
x = math_ops.conj(x)
partial_x = math_ops.polygamma(n + 1, x)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (None,
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),
sx))
else:
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.sigmoid_grad(y, grad)
@ops.RegisterGradient("SigmoidGrad")
def _SigmoidGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
gb = grad * b
return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad)
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
@ops.RegisterGradient("Tan")
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(secx2, grad)
else:
return secx2 * grad
@ops.RegisterGradient("Asin")
def _AsinGrad(op, grad):
"""Returns grad * 1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
if compat.forward_compatible(2019, 9, 14):
return math_ops.xdivy(grad, den)
else:
inv = math_ops.reciprocal(den)
return grad * inv
@ops.RegisterGradient("Acos")
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
if compat.forward_compatible(2019, 9, 14):
return -math_ops.xdivy(grad, den)
else:
inv = math_ops.reciprocal(den)
return -grad * inv
@ops.RegisterGradient("Atan")
def _AtanGrad(op, grad):
"""Returns grad * 1/ (1 + x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.add(one, x2))
return grad * inv
@ops.RegisterGradient("Atan2")
def _Atan2Grad(op, grad):
"""Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2)."""
y = op.inputs[0]
x = op.inputs[1]
with ops.control_dependencies([grad]):
if compat.forward_compatible(2019, 9, 14):
grad_inv = math_ops.xdivy(grad, (math_ops.square(x) + math_ops.square(y)))
else:
grad_inv = grad / (math_ops.square(x) + math_ops.square(y))
return x * grad_inv, -y * grad_inv
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
def _ShapesFullySpecifiedAndEqual(x, y, grad):
# pylint: disable=protected-access
x_shape = x._shape_tuple()
y_shape = y._shape_tuple()
grad_shape = grad._shape_tuple()
# pylint: enable=protected-access
return (x_shape == y_shape and x_shape == grad_shape and
x_shape is not None and None not in x_shape)
@ops.RegisterGradient("Add")
@ops.RegisterGradient("AddV2")
def _AddGrad(op, grad):
"""Gradient for Add."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return grad, None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, grad
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
else:
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
else:
gy = array_ops.reshape(math_ops.reduce_sum(grad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
"""Gradient for Sub."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, -grad
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
"""The gradient of scalar multiplication."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad) and
grad.dtype in (dtypes.int32, dtypes.float32)):
return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy))
@ops.RegisterGradient("MulNoNan")
def _MulNoNanGrad(op, grad):
"""The gradient of scalar multiplication with NaN-suppression."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(x, grad), ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.divide(math_ops.divide(-x, y), y), grad), ry),
sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.divide(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.divide(math_ops.divide(-x, y), y), ry), sy))
@ops.RegisterGradient("FloorDiv")
def _FloorDivGrad(_, unused_grad):
"""The gradient for the FloorDiv operator."""
return None, None
@ops.RegisterGradient("FloorMod")
def _FloorModGrad(op, grad):
"""Returns grad * (1, -floor(x/y))."""
x = math_ops.conj(op.inputs[0])
y = math_ops.conj(op.inputs[1])
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
floor_xy = math_ops.floor_div(x, y)
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
gy = array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
return gx, gy
@ops.RegisterGradient("TruncateDiv")
def _TruncateDivGrad(_, unused_grad):
return None, None
@ops.RegisterGradient("RealDiv")
def _RealDivGrad(op, grad):
"""RealDiv op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.realdiv(math_ops.realdiv(-x, y), y), grad),
ry), sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry),
sy))
@ops.RegisterGradient("DivNoNan")
def _DivNoNanGrad(op, grad):
"""DivNoNan op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
grad), ry), sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
if compat.forward_compatible(2019, 9, 14):
gx = array_ops.reshape(
math_ops.reduce_sum(
gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad), rx), sx)
else:
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
mask = math_ops.not_equal(x, 0)
else:
# There's no sensible real value to return if x < 0, so return 0
mask = x > 0
safe_x = array_ops.where(mask, x, array_ops.ones_like(x))
log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x))
if compat.forward_compatible(2019, 9, 14):
gy = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(z * log_x, grad), ry), sy)
else:
gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
def _MaximumMinimumGradInputOnly(op, grad, selector_op):
x = op.inputs[0]
y = op.inputs[1]
zeros = array_ops.zeros_like(grad)
xmask = selector_op(x, y)
xgrad = array_ops.where(xmask, grad, zeros)
ygrad = None # Return None for ygrad since the config allows that.
return (xgrad, ygrad)
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
# When we want to get gradients for the first input only, and the second
# input tensor is a scalar, we can do a much simpler calculation
return _MaximumMinimumGradInputOnly(op, grad, selector_op)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
else:
xgrad = array_ops.where(xmask, grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
else:
ygrad = array_ops.where(xmask, zeros, grad)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
@ops.RegisterGradient("SquaredDifference")
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
-array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("ApproximateEqual")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros_like(x)
return (None, array_ops.where(c, grad, zeros), array_ops.where(
c, zeros, grad))
@ops.RegisterGradient("SelectV2")
def _SelectGradV2(op, grad):
c = op.inputs[0]
x = op.inputs[1]
y = op.inputs[2]
zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype)
gx = array_ops.where_v2(c, grad, zeros)
x_shape = array_ops.shape(x)
output_shape = array_ops.shape(op.outputs[0])
# Reduce away broadcasted leading dims.
reduce_x, _ = gen_array_ops.broadcast_gradient_args(x_shape, output_shape)
gx = math_ops.reduce_sum(gx, keepdims=True, axis=reduce_x)
gx = array_ops.reshape(gx, x_shape)
gy = array_ops.where_v2(c, zeros, grad)
y_shape = array_ops.shape(y)
# Reduce away broadcasted leading dims.
reduce_y, _ = gen_array_ops.broadcast_gradient_args(y_shape, output_shape)
gy = math_ops.reduce_sum(gy, keepdims=True, axis=reduce_y)
gy = array_ops.reshape(gy, y_shape)
return (None, gx, gy)
def _MatMulGradAgainstFirstOnly(op, grad):
"""Gradient for MatMul, only for the first input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
return grad_a, None
def _MatMulGradAgainstSecondOnly(op, grad):
"""Gradient for MatMul, only for the second input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
if not t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return None, grad_b
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
"""Gradient for MatMul."""
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None:
if 1 in skip_input_indices:
return _MatMulGradAgainstFirstOnly(op, grad)
elif 0 in skip_input_indices:
return _MatMulGradAgainstSecondOnly(op, grad)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return grad_a, grad_b
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = {
op.inputs[0]: op.get_attr("a_is_sparse"),
op.inputs[1]: op.get_attr("b_is_sparse"),
# Use heuristic to figure out if grad might be sparse
grad: not context.executing_eagerly() and (grad.op.type == "ReluGrad")
}
def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
prod = math_ops.matmul(
t1,
t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if prod.dtype != out_dtype:
prod = math_ops.cast(prod, out_dtype)
return prod
dtype_a = op.inputs[0].dtype
dtype_b = op.inputs[1].dtype
if not t_a and not t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a),
_SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b))
elif t_a and t_b:
return (_SparseMatMul(
op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True),
_SparseMatMul(
grad, op.inputs[0], dtype_b, transpose_a=True,
transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Ceil")
def _CeilGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Round")
def _RoundGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Rint")
def _RintGrad(_, unused_grad):
# the gradient of Rint is zero
return [None]
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
return grad_x, grad_y
@ops.RegisterGradient("BatchMatMulV2")
def _BatchMatMulV2(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
# Reduce along the broadcasted batch dimensions, if broadcasting is required.
shape_x_static = x.get_shape()
shape_y_static = y.get_shape()
if not (shape_x_static.is_fully_defined() and
shape_y_static.is_fully_defined() and
shape_x_static == shape_y_static):
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2])
grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx)
grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy)
return grad_x, grad_y
ops.NotDifferentiable("Range")
ops.NotDifferentiable("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Angle")
def _AngleGrad(op, grad):
"""Returns -grad / (Im(x) + iRe(x))"""
x = op.inputs[0]
with ops.control_dependencies([grad]):
re = math_ops.real(x)
im = math_ops.imag(x)
z = math_ops.reciprocal(math_ops.complex(im, re))
zero = constant_op.constant(0, dtype=grad.dtype)
complex_grad = math_ops.complex(grad, zero)
return -complex_grad * z
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("ComplexAbs")
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
return math_ops.div_no_nan(
math_ops.complex(
grad, array_ops.zeros_like(grad)) * op.inputs[0],
math_ops.complex(
op.outputs[0], array_ops.zeros_like(op.outputs[0])))
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,
dtypes.complex64, dtypes.complex128
]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
@ops.RegisterGradient("Cross")
def _CrossGrad(op, grad):
u = op.inputs[0]
v = op.inputs[1]
return (math_ops.cross(v, grad), math_ops.cross(grad, u))
@ops.RegisterGradient("Cumsum")
def _CumsumGrad(op, grad):
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
return [
math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse),
None
]
@ops.RegisterGradient("Cumprod")
def _CumprodGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# TODO This fails when x contains 0 and should be fixed
prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
out = math_ops.cumsum(
prod * grad, axis, exclusive=exclusive, reverse=not reverse)
return [out / x, None]
@ops.RegisterGradient("NextAfter")
def _NextAfterGrad(op, grad):
"""Returns gradient of nextafter(x1, x2) with respect to x1 and x2."""
x1 = op.inputs[0]
x2 = op.inputs[1]
s_x1 = array_ops.shape(x1)
s_x2 = array_ops.shape(x2)
r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)
with ops.control_dependencies([grad]):
partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)
partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)
return (array_ops.reshape(
math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1),
array_ops.reshape(
math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))
| apache-2.0 | 2,105,415,834,781,362,400 | 33.54222 | 81 | 0.653891 | false |
robled/rocket-depot | setup.py | 1 | 2523 | import subprocess
from distutils.log import warn, info
from setuptools import setup
setup(
name='rocket-depot',
version='1.0.0',
scripts=['rocket-depot'],
# metadata for upload to PyPI
platforms='linux',
author='David Roble',
author_email='[email protected]',
maintainer='David Roble',
maintainer_email='[email protected]',
description='An rdesktop/xfreerdp frontend.',
long_description=open('README.txt').read(),
license='GNU GPLv3',
keywords=['rdesktop', 'freerdp', 'rdp', 'remote desktop',
'terminal server'],
url='https://github.com/robled/rocket-depot',
data_files=[
('/usr/share/applications',
['data/rocket-depot.desktop']),
('/usr/share/icons/hicolor/16x16/apps',
['data/icons/16x16/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/22x22/apps',
['data/icons/22x22/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/24x24/apps',
['data/icons/24x24/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/32x32/apps',
['data/icons/32x32/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/48x48/apps',
['data/icons/48x48/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/64x64/apps',
['data/icons/64x64/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/128x128/apps',
['data/icons/128x128/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/256x256/apps',
['data/icons/256x256/apps/rocket-depot.png']),
('/usr/share/icons/hicolor/scalable/apps',
['data/icons/scalable/apps/rocket-depot.svg']),
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
)
info('running gtk-update-icon-cache')
try:
subprocess.call(['gtk-update-icon-cache', '-q', '-f', '-t',
'/usr/share/icons/hicolor'])
except OSError as e:
warn('updating the GTK icon cache failed: %s' % str(e))
| gpl-3.0 | 8,640,305,625,367,821,000 | 38.421875 | 70 | 0.604439 | false |
mjcaley/aiospamc | tests/test_responses.py | 1 | 3390 | #!/usr/bin/env python3
import pytest
import zlib
from aiospamc.exceptions import *
from aiospamc.header_values import CompressValue
from aiospamc.incremental_parser import ResponseParser
from aiospamc.responses import Response
def test_init_version():
r = Response(version="4.2", status_code=0, message="EX_OK")
result = bytes(r).split(b" ")[0]
assert result == b"SPAMD/4.2"
def test_init_status_code():
r = Response(version="1.5", status_code=0, message="EX_OK")
result = bytes(r).split(b" ")[1]
assert result == str(0).encode()
def test_init_message():
r = Response(version="1.5", status_code=0, message="EX_OK")
result = bytes(r).split(b"\r\n")[0]
assert result.endswith("EX_OK".encode())
def test_bytes_status():
r = Response(status_code=999, message="Test message")
result = bytes(r).partition(b"\r\n")[0]
assert b"999 Test message" in result
def test_bytes_headers(x_headers):
r = Response(version="1.5", status_code=0, message="EX_OK", headers=x_headers)
result = bytes(r).partition(b"\r\n")[2]
expected = b"".join(
[
b"%b: %b\r\n" % (key.encode("ascii"), bytes(value))
for key, value in r.headers.items()
]
)
assert result.startswith(expected)
assert result.endswith(b"\r\n\r\n")
def test_bytes_body():
test_input = b"Test body\n"
r = Response(version="1.5", status_code=0, message="EX_OK", body=test_input)
result = bytes(r).rpartition(b"\r\n")[2]
assert result == test_input
def test_bytes_body_compressed():
test_input = b"Test body\n"
r = Response(
version="1.5",
status_code=0,
message="EX_OK",
headers={"Compress": CompressValue()},
body=test_input,
)
result = bytes(r).rpartition(b"\r\n")[2]
assert result == zlib.compress(test_input)
def test_str():
r = Response(status_code=0, message="EX_OK")
result = str(r)
assert result == f"<0 - EX_OK: aiospamc.responses.Response object at {id(r)}>"
def test_eq_other_obj_is_false():
r = Response()
assert False is (r == "")
def test_raise_for_status_ok():
r = Response(version="1.5", status_code=0, message="")
assert r.raise_for_status() is None
@pytest.mark.parametrize(
"status_code, exception",
[
(64, UsageException),
(65, DataErrorException),
(66, NoInputException),
(67, NoUserException),
(68, NoHostException),
(69, UnavailableException),
(70, InternalSoftwareException),
(71, OSErrorException),
(72, OSFileException),
(73, CantCreateException),
(74, IOErrorException),
(75, TemporaryFailureException),
(76, ProtocolException),
(77, NoPermissionException),
(78, ConfigException),
(79, ServerTimeoutException),
],
)
def test_raise_for_status(status_code, exception):
r = Response(version="1.5", status_code=status_code, message="")
with pytest.raises(exception):
r.raise_for_status()
def test_raise_for_undefined_status():
r = Response(version="1.5", status_code=999, message="")
with pytest.raises(ResponseException):
r.raise_for_status()
def test_response_from_parser_result(response_with_body):
p = ResponseParser().parse(response_with_body)
r = Response(**p)
assert r is not None
| mit | 6,483,868,723,979,816,000 | 24.111111 | 82 | 0.623009 | false |
swegener/gruvi | lib/gruvi/address.py | 1 | 3675 | #
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2014 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import six
import pyuv
from .hub import get_hub, switch_back, switchpoint
__all__ = ['saddr', 'paddr', 'getaddrinfo', 'getnameinfo']
def saddr(address):
"""Return a string representation for an address.
The *address* paramater can be a pipe name, an IP address tuple, or a
socket address.
The return value is always a ``str`` instance.
"""
if isinstance(address, six.binary_type) and six.PY3:
return address.decode('utf8')
elif isinstance(address, six.string_types):
return address
elif isinstance(address, tuple) and ':' in address[0]:
return '[{}]:{}'.format(address[0], address[1])
elif isinstance(address, tuple):
return '{}:{}'.format(*address)
else:
raise TypeError('illegal address type: {!s}'.format(type(address)))
def paddr(address):
"""Parse a string representation of an address.
This function is the inverse of :func:`saddr`.
"""
if address.startswith('['):
p1 = address.find(']:')
if p1 == -1:
raise ValueError
return (address[1:p1], int(address[p1+2:]))
elif ':' in address:
p1 = address.find(':')
return (address[:p1], int(address[p1+1:]))
else:
return address
@switchpoint
def getaddrinfo(node, service=0, family=0, socktype=0, protocol=0, flags=0, timeout=30):
"""Resolve an Internet *node* name and *service* into a socket address.
The *family*, *socktype* and *protocol* are optional arguments that specify
the address family, socket type and protocol, respectively. The *flags*
argument allows you to pass flags to further modify the resolution process.
See the :func:`socket.getaddrinfo` function for a detailed description of
these arguments.
The return value is a list of ``(family, socktype, proto, canonname,
sockaddr)`` tuples. The fifth element (``sockaddr``) is the socket address.
It will be a 2-tuple ``(addr, port)`` for an IPv4 address, and a 4-tuple
``(addr, port, flowinfo, scopeid)`` for an IPv6 address.
The address resolution is performed in the libuv thread pool.
"""
hub = get_hub()
with switch_back(timeout) as switcher:
request = pyuv.dns.getaddrinfo(hub.loop, node, service, family,
socktype, protocol, flags, callback=switcher)
switcher.add_cleanup(request.cancel)
result = hub.switch()
result, error = result[0]
if error:
message = pyuv.errno.strerror(error)
raise pyuv.error.UVError(error, message)
return result
@switchpoint
def getnameinfo(sockaddr, flags=0, timeout=30):
"""Resolve a socket address *sockaddr* back to a ``(node, service)`` tuple.
The *flags* argument can be used to modify the resolution process. See the
:func:`socket.getnameinfo` function for more information.
The address resolution is performed in the libuv thread pool.
"""
hub = get_hub()
with switch_back(timeout) as switcher:
request = pyuv.dns.getnameinfo(hub.loop, sockaddr, flags, callback=switcher)
switcher.add_cleanup(request.cancel)
result = hub.switch()
result, error = result[0]
if error:
message = pyuv.errno.strerror(error)
raise pyuv.error.UVError(error, message)
return result
| mit | -7,268,691,083,574,085,000 | 34.336538 | 88 | 0.661224 | false |
harrystech/arthur-redshift-etl | python/etl/monitor.py | 1 | 36933 | """
Monitoring (and logging) for ETL steps.
This module provides a context for the ETL that allows to monitor
the start time of an ETL step along with its successful or
unsuccessful completion. Events for start, finish or failure
may be emitted to a persistence layer.
"""
import http.server
import itertools
import logging
import os
import queue
import random
import socketserver
import sys
import threading
import time
import traceback
import urllib.parse
import uuid
from calendar import timegm
from collections import Counter, OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
from decimal import Decimal
from http import HTTPStatus
from operator import itemgetter
from typing import Dict, Iterable, List, Optional, Union
import boto3
import botocore.exceptions
import funcy as fy
import simplejson as json
from boto3.dynamodb.types import TypeDeserializer
from tqdm import tqdm
import etl.assets
import etl.config
import etl.text
from etl.errors import ETLRuntimeError
from etl.json_encoder import FancyJsonEncoder
from etl.timer import Timer, elapsed_seconds, utc_now
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
STEP_START = "start"
STEP_FINISH = "finish"
STEP_FAIL = "fail"
_DUMMY_TARGET = "#.dummy"
def trace_key():
"""
Return a "trace key" suitable to track program execution.
It's most likely unique between invocations.
"""
# We will never make a 32-bit operating system.
return uuid.uuid4().hex[:16].upper()
class MetaMonitor(type):
"""
Metaclass to implement read-only attributes of our ETL's Monitor.
If you need to find out the current trace key, call Monitor.etl_id.
If you want to know the "environment" (selected by using --prefix or the user's login),
then use Monitor.environment.
If you want to know the runtime environment (EMR, instance, step), use Monitor.cluster_info.
Behind the scenes, some properties actually do a lazy evaluation.
"""
@property
def etl_id(cls):
if cls._trace_key is None:
cls._trace_key = trace_key()
return cls._trace_key
@property
def environment(cls):
if cls._environment is None:
raise ValueError("value of 'environment' is None")
return cls._environment
@environment.setter
def environment(cls, value):
cls._environment = value
@property
def cluster_info(cls):
if cls._cluster_info is None:
job_flow = "/mnt/var/lib/info/job-flow.json"
if os.path.exists(job_flow):
with open(job_flow) as f:
data = json.load(f)
cluster_info = {"cluster_id": data["jobFlowId"], "instance_id": data["masterInstanceId"]}
parent_dir, current_dir = os.path.split(os.getcwd())
if parent_dir == "/mnt/var/lib/hadoop/steps":
cluster_info["step_id"] = current_dir
else:
cluster_info = {}
cls._cluster_info = cluster_info
return cls._cluster_info
class Monitor(metaclass=MetaMonitor):
"""
Context manager to monitor ETL steps for some target table.
Monitor instances have these properties which will be stored in the event payload:
environment: a description of the source folder (aka prefix)
etl_id: a UUID for each ETL run (All monitors of the same ETL run with the same 'etl_id'.)
target: name of table or view in the data warehouse
step: command that is running, like 'dump', or 'load'
The payloads will have at least the properties of the Monitor instance and:
event: one of ('start', 'finish', 'fail')
timestamp: UTC timestamp
In case of errors, they are added as an array 'errors'. It is also possible to send
some extra information into monitor payloads. Anything extra must be of type list,
dict, str, or int (or bad things will happen).
Example usage of attributes:
>>> id_ = Monitor.etl_id
>>> isinstance(id_, str)
True
>>> Monitor.etl_id == id_
True
>>> Monitor.environment
Traceback (most recent call last):
...
ValueError: value of 'environment' is None
>>> Monitor.environment = 'saturn'
>>> Monitor.environment
'saturn'
Example use of a monitor instance (with dry_run=True to avoid persistence calls during testing):
>>> m = Monitor('schema.table', 'frobnicate', dry_run=True)
>>> payload = MonitorPayload(m, 'test', utc_now())
>>> payload.step
'frobnicate'
>>> payload.event
'test'
Normally, you would leave the creation of the payload to the context manager:
>>> with Monitor('schema.table', 'frobnicate', dry_run=True):
... pass
"""
# See MetaMonitor class for getters and setters
_trace_key = None
_environment = None
_cluster_info = None
def __init__(self, target: str, step: str, dry_run: bool = False, **kwargs) -> None:
self._monitor_id = trace_key()
self._target = target
self._step = step
self._dry_run = dry_run
# Create a deep copy so that changes made later by the caller don't alter our payload.
self._extra = deepcopy(dict(**kwargs))
self._index = self._extra.get("index")
# Read-only properties (in order of cardinality)
@property
def environment(self):
return Monitor.environment
@property
def cluster_info(self):
return Monitor.cluster_info
@property
def etl_id(self):
return Monitor.etl_id
@property
def target(self):
return self._target
@property
def step(self):
return self._step
@property
def monitor_id(self):
return self._monitor_id
def __enter__(self):
if self._index:
logger.info(
"Starting %s step for '%s' (%d/%d)",
self.step,
self.target,
self._index["current"],
self._index["final"],
)
else:
logger.info("Starting %s step for '%s'", self.step, self.target)
self._start_time = utc_now()
payload = MonitorPayload(self, STEP_START, self._start_time, extra=self._extra)
payload.emit(dry_run=self._dry_run)
return self
def __exit__(self, exc_type, exc_value, tb):
self._end_time = utc_now()
seconds = elapsed_seconds(self._start_time, self._end_time)
if exc_type is None:
event = STEP_FINISH
errors = None
logger.info("Finished %s step for '%s' (%0.2fs)", self._step, self._target, seconds)
else:
event = STEP_FAIL
errors = [
{
"code": (exc_type.__module__ + "." + exc_type.__qualname__).upper(),
"message": traceback.format_exception_only(exc_type, exc_value)[0].strip(),
}
]
logger.warning("Failed %s step for '%s' (%0.2fs)", self._step, self._target, seconds)
payload = MonitorPayload(
self, event, self._end_time, elapsed=seconds, errors=errors, extra=self._extra
)
payload.emit(dry_run=self._dry_run)
def add_extra(self, key, value):
if key in self._extra:
raise KeyError("duplicate key in 'extra' payload")
self._extra[key] = value
@classmethod
def marker_payload(cls, step: str):
monitor = cls(_DUMMY_TARGET, step)
return MonitorPayload(monitor, STEP_FINISH, utc_now(), elapsed=0, extra={"is_marker": True})
class InsertTraceKey(logging.Filter):
"""Called as a logging filter: insert the ETL id as the trace key into the logging record."""
def filter(self, record):
record.trace_key = Monitor.etl_id
return True
class PayloadDispatcher:
def store(self, payload):
"""Send payload to persistence layer."""
raise NotImplementedError("PayloadDispatcher failed to implement store method")
class MonitorPayload:
"""
Simple class to encapsulate data for Monitor events which knows how to morph into JSON etc.
You should consider all attributes to be read-only with the possible exception of 'errors'
that may be set to a list of objects (in JSON-terminology) with 'code' and 'message' fields.
(Which is to say: do not modify the payload object!)
"""
# Append instances with a 'store' method here (skipping writing a metaclass this time)
dispatchers: List[PayloadDispatcher] = []
def __init__(self, monitor, event, timestamp, elapsed=None, errors=None, extra=None):
# Basic info
self.environment = monitor.environment
self.etl_id = monitor.etl_id
self.target = monitor.target
self.step = monitor.step
self.monitor_id = monitor.monitor_id
self.event = event
self.timestamp = timestamp
# Premium info (when available)
self.cluster_info = monitor.cluster_info
self.elapsed = elapsed
self.errors = errors
self.extra = extra
def emit(self, dry_run=False):
payload = vars(self)
# Delete entries that are often not present:
for key in ["cluster_info", "elapsed", "extra", "errors"]:
if not payload[key]:
del payload[key]
compact_text = json.dumps(payload, sort_keys=True, separators=(",", ":"), cls=FancyJsonEncoder)
if dry_run:
logger.debug("Dry-run: payload = %s", compact_text)
return
logger.debug("Monitor payload = %s", compact_text)
for d in MonitorPayload.dispatchers:
d.store(payload)
class DynamoDBStorage(PayloadDispatcher):
"""
Store ETL events in a DynamoDB table.
Note the table is created if it doesn't already exist when a payload needs to be stored.
"""
@staticmethod
def factory() -> "DynamoDBStorage":
table_name = "{}-{}".format(etl.config.get_config_value("resource_prefix"), "events")
return DynamoDBStorage(
table_name,
etl.config.get_config_value("etl_events.read_capacity"),
etl.config.get_config_value("etl_events.write_capacity"),
)
def __init__(self, table_name, read_capacity, write_capacity):
self.table_name = table_name
self.initial_read_capacity = read_capacity
self.initial_write_capacity = write_capacity
# Avoid default sessions and have one table reference per thread
self._thread_local_table = threading.local()
def get_table(self, create_if_not_exists=True):
"""Get table reference from DynamoDB or create it (within a new session)."""
session = boto3.session.Session()
logger.debug(f"Started new boto3 session in region '{session.region_name}'")
dynamodb = session.resource("dynamodb")
try:
table = dynamodb.Table(self.table_name)
status = table.table_status
logger.info(f"Found existing events table '{self.table_name}' in DynamoDB (status: {status})")
except botocore.exceptions.ClientError as exc:
# Check whether this is just a ResourceNotFoundException (sadly a 400, not a 404)
if exc.response["ResponseMetadata"]["HTTPStatusCode"] != 400:
raise
# Nullify assignment and start over
table = None
status = None
if not (status == "ACTIVE" or create_if_not_exists):
raise ETLRuntimeError("DynamoDB table '%s' does not exist or is not active" % self.table_name)
if table is None:
logger.info(f"Creating DynamoDB table: '{self.table_name}'")
table = dynamodb.create_table(
TableName=self.table_name,
KeySchema=[
{"AttributeName": "target", "KeyType": "HASH"},
{"AttributeName": "timestamp", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "target", "AttributeType": "S"},
{"AttributeName": "timestamp", "AttributeType": "N"},
],
ProvisionedThroughput={
"ReadCapacityUnits": self.initial_read_capacity,
"WriteCapacityUnits": self.initial_write_capacity,
},
)
status = table.table_status
if status != "ACTIVE":
logger.info(f"Waiting for events table '{self.table_name}' to become active")
table.wait_until_exists()
logger.debug(
f"Finished creating or updating events table '{self.table_name}' (arn={table.table_arn})"
)
return table
def store(self, payload: dict, _retry: bool = True):
"""
Actually send the payload to the DynamoDB table.
If this is the first call at all, then get a reference to the table, or even create the
table as necessary.
This method will try to store the payload a second time if there's an error in the first
attempt.
"""
try:
table = getattr(self._thread_local_table, "table", None)
if not table:
table = self.get_table()
self._thread_local_table.table = table
item = dict(payload)
# Cast timestamp (and elapsed seconds) into Decimal since DynamoDB cannot handle float.
# But decimals maybe finicky when instantiated from float so we make sure to fix the
# number of decimals.
item["timestamp"] = Decimal("%.6f" % item["timestamp"].timestamp())
if "elapsed" in item:
item["elapsed"] = Decimal("%.6f" % item["elapsed"])
table.put_item(Item=item)
except botocore.exceptions.ClientError:
# Something bad happened while talking to the service ... just try one more time.
if _retry:
logger.warning("Trying to store payload a second time after this mishap:", exc_info=True)
self._thread_local_table.table = None
delay = random.uniform(3, 10)
logger.debug("Snoozing for %.1fs", delay)
time.sleep(delay)
self.store(payload, _retry=False)
else:
raise
class _ThreadingSimpleServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
pass
class MemoryStorage(PayloadDispatcher):
"""
Store ETL events in memory and make the events accessible via HTTP.
When the ETL is running for extract, load, or unload, connect to port 8086.
When the ETL is running on a host other than your local computer, say in EC2, then use
port forwarding, to send requests from your host to an address seen on the other host:
ssh -L 8086:localhost:8086 <hostname>
The output should pass validator at https://validator.w3.org/#validate_by_input+with_options
"""
SERVER_HOST = "" # meaning: all that we can bind to locally
SERVER_PORT = 8086
def __init__(self):
self.queue = queue.Queue()
self.events = OrderedDict()
self.start_server()
def store(self, payload: dict):
self.queue.put(payload)
def _drain_queue(self):
try:
while True:
payload = self.queue.get_nowait()
if not payload.get("extra", {}).get("is_marker", False):
# Overwrite earlier events by later ones
key = payload["target"], payload["step"]
self.events[key] = payload
except queue.Empty:
pass
def get_indices(self):
self._drain_queue()
indices = {}
counter = Counter()
for payload in self.events.values():
index = dict(payload.get("extra", {}).get("index", {}))
name = index.setdefault("name", "N/A")
if name not in indices:
indices[name] = index
elif index["current"] > indices[name]["current"]:
indices[name].update(index)
if payload["event"] != STEP_START:
counter[name] += 1
indices[name]["counter"] = counter[name]
indices_as_list = [indices[name] for name in sorted(indices)]
return etl.assets.Content(json=indices_as_list)
def get_events(self, event_id: Optional[str]):
self._drain_queue()
if event_id is None:
events_as_list = sorted(
(self.events[key] for key in self.events),
key=lambda p: (2 if p["event"] == STEP_START else 1, p["timestamp"]),
reverse=True,
)
else:
events_as_list = [event for event in self.events.values() if event["monitor_id"] == event_id]
return etl.assets.Content(json=events_as_list)
def create_handler(self):
"""Return a handler that serves our storage content, used as factory method."""
storage = self
http_logger = logging.getLogger("arthur_http")
class MonitorHTTPHandler(http.server.BaseHTTPRequestHandler):
server_version = "MonitorHTTPServer/1.0"
log_error = http_logger.error
log_message = http_logger.info
def do_GET(self):
"""
Serve a GET (or HEAD) request.
We serve assets or JSON via the API.
If the command is HEAD (and not GET), only the header is sent. Duh.
"""
parts = urllib.parse.urlparse(self.path.rstrip("/"))
path = (parts.path or "/index.html").lstrip("/")
if path == "api/etl-id":
result = etl.assets.Content(json={"id": Monitor.etl_id})
elif path == "api/indices":
result = storage.get_indices()
elif path.startswith("api/events"):
segment = path.replace("api/events", "").strip("/")
result = storage.get_events(segment or None)
elif path == "api/command-line":
result = etl.assets.Content(json={"args": " ".join(sys.argv)})
elif etl.assets.asset_exists(path):
result = etl.assets.get_asset(path)
else:
# self.send_response(HTTPStatus.NOT_FOUND)
self.send_response(HTTPStatus.MOVED_PERMANENTLY)
new_parts = (parts.scheme, parts.netloc, "/", None, None)
new_url = urllib.parse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", result.content_type)
self.send_header("Content-Length", result.content_length)
if result.content_encoding is not None:
self.send_header("Content-Encoding", result.content_encoding)
self.send_header("Last-Modified", result.last_modified)
if result.cache_control is not None:
self.send_header("Cache-Control", result.cache_control)
self.end_headers()
if self.command == "GET":
self.wfile.write(result.content)
do_HEAD = do_GET
return MonitorHTTPHandler
def start_server(self):
"""Start background daemon to serve our events."""
handler_class = self.create_handler()
class BackgroundServer(threading.Thread):
def run(self):
logger.info("Starting background server for monitor on port %d", MemoryStorage.SERVER_PORT)
try:
httpd = _ThreadingSimpleServer(
(MemoryStorage.SERVER_HOST, MemoryStorage.SERVER_PORT), handler_class
)
httpd.serve_forever()
except Exception as exc:
logger.info("Background server stopped: %s", str(exc))
try:
thread = BackgroundServer(daemon=True)
thread.start()
except RuntimeError:
logger.warning("Failed to start monitor server:", exc_info=True)
def start_monitors(environment):
Monitor.environment = environment
memory = MemoryStorage()
MonitorPayload.dispatchers.append(memory)
if etl.config.get_config_value("etl_events.enabled"):
ddb = DynamoDBStorage.factory()
MonitorPayload.dispatchers.append(ddb)
else:
logger.warning("Writing events to a DynamoDB table is disabled in settings.")
def _format_output_column(key: str, value: str) -> str:
if value is None:
return "---"
elif key == "timestamp":
# Make timestamp readable by turning epoch seconds into a date.
return datetime.utcfromtimestamp(float(value)).replace(microsecond=0).isoformat()
elif key == "elapsed":
# Reduce number of decimals to 2.
return "{:6.2f}".format(float(value))
elif key == "rowcount":
return "{:9d}".format(int(value))
else:
return value
def _query_for_etls(step=None, hours_ago=0, days_ago=0) -> List[dict]:
"""Search for ETLs by looking for the "marker" event at the start of an ETL command."""
start_time = datetime.utcnow() - timedelta(days=days_ago, hours=hours_ago)
epoch_seconds = timegm(start_time.utctimetuple())
attribute_values = {
":marker": _DUMMY_TARGET,
":epoch_seconds": epoch_seconds,
":finish_event": STEP_FINISH,
}
if step is not None:
attribute_values[":step"] = step
filter_exp = "event = :finish_event"
if step is not None:
filter_exp += " and step = :step"
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
response = table.query(
ConsistentRead=True,
ExpressionAttributeNames={"#timestamp": "timestamp"}, # "timestamp" is a reserved word.
ExpressionAttributeValues=attribute_values,
KeyConditionExpression="target = :marker and #timestamp > :epoch_seconds",
FilterExpression=filter_exp,
ProjectionExpression="etl_id, step, #timestamp",
ReturnConsumedCapacity="TOTAL",
)
if "LastEvaluatedKey" in response:
logger.warning("This is is a partial result! Last evaluated key: '%s'", response["LastEvaluatedKey"])
logger.info(
"Query result: count = %d, scanned count = %d, consumed capacity = %f",
response["Count"],
response["ScannedCount"],
response["ConsumedCapacity"]["CapacityUnits"],
)
return response["Items"]
def query_for_etl_ids(hours_ago=0, days_ago=0) -> None:
"""Show recent ETLs with their step and execution start."""
etl_info = _query_for_etls(hours_ago=hours_ago, days_ago=days_ago)
keys = ["etl_id", "step", "timestamp"]
rows = [[_format_output_column(key, info[key]) for key in keys] for info in etl_info]
rows.sort(key=itemgetter(keys.index("timestamp")))
print(etl.text.format_lines(rows, header_row=keys))
def scan_etl_events(etl_id, selected_columns: Optional[Iterable[str]] = None) -> None:
"""
Scan for all events belonging to a specific ETL.
If a list of columns is provided, then the output is limited to those columns.
But note that the target (schema.table) and the event are always present.
"""
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
available_columns = ["target", "step", "event", "timestamp", "elapsed", "rowcount"]
if selected_columns is None:
selected_columns = available_columns
# We will always select "target" and "event" to have a meaningful output.
columns = list(fy.filter(frozenset(selected_columns).union(["target", "event"]), available_columns))
keys = ["extra.rowcount" if column == "rowcount" else column for column in columns]
# We need to scan here since the events are stored by "target" and not by "etl_id".
# TODO Try to find all the "known" relations and query on them with a filter on the etl_id.
client = boto3.client("dynamodb")
paginator = client.get_paginator("scan")
response_iterator = paginator.paginate(
TableName=table.name,
ConsistentRead=False,
ExpressionAttributeNames={"#timestamp": "timestamp"},
ExpressionAttributeValues={
":etl_id": {"S": etl_id},
":marker": {"S": _DUMMY_TARGET},
":start_event": {"S": STEP_START},
},
FilterExpression="etl_id = :etl_id and target <> :marker and event <> :start_event",
ProjectionExpression="target, step, event, #timestamp, elapsed, extra.rowcount",
ReturnConsumedCapacity="TOTAL",
# PaginationConfig={
# "PageSize": 100
# }
)
logger.info("Scanning events table '%s' for elapsed times", table.name)
consumed_capacity = 0.0
scanned_count = 0
rows: List[List[str]] = []
deserialize = TypeDeserializer().deserialize
for response in response_iterator:
consumed_capacity += response["ConsumedCapacity"]["CapacityUnits"]
scanned_count += response["ScannedCount"]
# We need to turn something like "'event': {'S': 'finish'}" into "'event': 'finish'".
deserialized = [
{key: deserialize(value) for key, value in item.items()} for item in response["Items"]
]
# Lookup "elapsed" or "extra.rowcount" (the latter as ["extra", "rowcount"]).
items = [{key: fy.get_in(item, key.split(".")) for key in keys} for item in deserialized]
# Scope down to selected keys and format the columns.
rows.extend([_format_output_column(key, item[key]) for key in keys] for item in items)
logger.info("Scan result: scanned count = %d, consumed capacity = %f", scanned_count, consumed_capacity)
if "timestamp" in keys:
rows.sort(key=itemgetter(keys.index("timestamp")))
else:
rows.sort(key=itemgetter(keys.index("target")))
print(etl.text.format_lines(rows, header_row=columns))
class EventsQuery:
def __init__(self, step: Optional[str] = None) -> None:
self._keys = ["target", "step", "event", "timestamp", "elapsed", "extra.rowcount"]
values = {
":target": None, # will be set when called
":epoch_seconds": None, # will be set when called
":start_event": STEP_START,
}
# Only look for finish or fail events
filter_exp = "event <> :start_event"
if step is not None:
values[":step"] = step
filter_exp += " and step = :step"
base_query = {
"ConsistentRead": False,
"ExpressionAttributeNames": {"#timestamp": "timestamp"},
"ExpressionAttributeValues": values,
"KeyConditionExpression": "target = :target and #timestamp > :epoch_seconds",
"FilterExpression": filter_exp,
"ProjectionExpression": "target, step, event, #timestamp, elapsed, extra.rowcount",
}
self._base_query = base_query
@property
def keys(self):
return self._keys[:]
def __call__(self, table, target, epoch_seconds):
query = deepcopy(self._base_query)
query["ExpressionAttributeValues"][":target"] = target
query["ExpressionAttributeValues"][":epoch_seconds"] = epoch_seconds
response = table.query(**query)
events = [{key: fy.get_in(item, key.split(".")) for key in self.keys} for item in response["Items"]]
# Return latest event or None
if events:
events.sort(key=itemgetter("timestamp"))
return events[-1]
return None
class BackgroundQueriesRunner(threading.Thread):
"""
An instance of this thread will repeatedly try to run queries on a DynamoDB table.
Every time a query returns a result, this result is sent to a queue and the query will no
longer be tried.
"""
def __init__(
self, targets, query, consumer_queue, start_time, update_interval, idle_time_out, **kwargs
) -> None:
super().__init__(**kwargs)
self.targets = list(targets)
self.query = query
self.queue = consumer_queue
self.start_time = start_time
self.update_interval = update_interval
self.idle_time_out = idle_time_out
def run(self):
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
targets = self.targets
start_time = self.start_time
idle = Timer()
while targets:
logger.debug(
"Waiting for events for %d target(s), start time = '%s'",
len(targets),
datetime.utcfromtimestamp(start_time).isoformat(),
)
new_start_time = datetime.utcnow() - timedelta(seconds=1) # avoid rounding errors
query_loop = Timer()
retired = set()
for target in targets:
latest_event = self.query(table, target, start_time)
if latest_event:
self.queue.put(latest_event)
retired.add(latest_event["target"])
targets = [t for t in targets if t not in retired]
start_time = timegm(new_start_time.utctimetuple())
if self.update_interval is None or not targets:
break
if retired:
idle = Timer()
elif self.idle_time_out and idle.elapsed > self.idle_time_out:
logger.info(
"Idle time-out: Waited for %d seconds but no events arrived, " "%d target(s) remaining",
self.idle_time_out,
len(targets),
)
break
if query_loop.elapsed < self.update_interval:
time.sleep(self.update_interval - query_loop.elapsed)
logger.info(
"Found events for %d out of %d target(s)", len(self.targets) - len(targets), len(self.targets)
)
self.queue.put(None)
def recently_extracted_targets(source_relations, start_time):
"""
Query the events table for "extract" events on the provided source_relations after start_time.
Waits for up to an hour, sleeping for 30s between checks.
Return the set of targets (ie, relation.identifier or event["target"]) with successful extracts.
"""
targets = [relation.identifier for relation in source_relations]
query = EventsQuery("extract")
consumer_queue = queue.Queue() # type: ignore
start_as_epoch = timegm(start_time.utctimetuple())
timeout = 60 * 60
extract_querying_thread = BackgroundQueriesRunner(
targets, query, consumer_queue, start_as_epoch, update_interval=30, idle_time_out=timeout, daemon=True
)
extract_querying_thread.start()
extracted_targets = set()
while True:
try:
event = consumer_queue.get(timeout=timeout)
if event is None:
break
if event["event"] == STEP_FINISH:
extracted_targets.add(event["target"])
except queue.Empty:
break
return extracted_targets
def summarize_events(relations, step: Optional[str] = None) -> None:
"""Summarize latest ETL step for the given relations by showing elapsed time and row count."""
etl_info = _query_for_etls(step=step, days_ago=7)
if not len(etl_info):
logger.warning("Found no ETLs within the last 7 days")
return
latest_etl = sorted(etl_info, key=itemgetter("timestamp"))[-1]
latest_start = latest_etl["timestamp"]
logger.info("Latest ETL: %s", latest_etl)
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
query = EventsQuery(step)
events = []
schema_events: Dict[str, Dict[str, Union[str, Decimal]]] = {}
for relation in tqdm(
desc="Querying for events", disable=None, iterable=relations, leave=False, unit="table"
):
event = query(table, relation.identifier, latest_start)
if event:
# Make the column for row counts easier to read by dropping "extra.".
event["rowcount"] = event.pop("extra.rowcount")
events.append(dict(event, kind=relation.kind))
schema = relation.target_table_name.schema
if schema not in schema_events:
schema_events[schema] = {
"target": schema,
"kind": "---",
"step": event["step"],
"timestamp": Decimal(0),
"event": "complete",
"elapsed": Decimal(0),
"rowcount": Decimal(0),
}
if event["timestamp"] > schema_events[schema]["timestamp"]:
schema_events[schema]["timestamp"] = event["timestamp"]
schema_events[schema]["elapsed"] += event["elapsed"]
schema_events[schema]["rowcount"] += event["rowcount"] if event["rowcount"] else 0
# Add pseudo events to show schemas are done.
events.extend(schema_events.values())
keys = ["target", "kind", "step", "timestamp", "event", "elapsed", "rowcount"]
rows = [[_format_output_column(key, info[key]) for key in keys] for info in events]
rows.sort(key=itemgetter(keys.index("timestamp")))
print(etl.text.format_lines(rows, header_row=keys))
def tail_events(
relations, start_time, update_interval=None, idle_time_out=None, step: Optional[str] = None
) -> None:
"""Tail the events table and show latest finish or fail events coming in."""
targets = [relation.identifier for relation in relations]
query = EventsQuery(step)
consumer_queue = queue.Queue() # type: ignore
epoch_seconds = timegm(start_time.utctimetuple())
thread = BackgroundQueriesRunner(
targets, query, consumer_queue, epoch_seconds, update_interval, idle_time_out, daemon=True
)
thread.start()
events = []
n_printed = 0
done = False
while not done:
progress = Timer()
while progress.elapsed < 10:
try:
event = consumer_queue.get(timeout=10)
if event is None:
done = True
break
event["timestamp"] = datetime.utcfromtimestamp(event["timestamp"]).isoformat()
events.append(event)
except queue.Empty:
break
# Keep printing tail of table that accumulates the events.
if len(events) > n_printed:
lines = etl.text.format_lines(
[[event[header] for header in query.keys] for event in events], header_row=query.keys
).split("\n")
if n_printed:
print("\n".join(lines[n_printed + 2 : -1])) # skip header and final "(x rows)" line
else:
print("\n".join(lines[:-1])) # only skip the "(x rows)" line
n_printed = len(lines) - 3 # header, separator, final = 3 extra rows
if done:
print(lines[-1])
def test_run():
Monitor.environment = "test" # type: ignore
memory = MemoryStorage()
MonitorPayload.dispatchers.append(memory)
schema_names = ["auburn", "burgundy", "cardinal", "flame", "fuchsia"]
table_names = ["apple", "banana", "cantaloupe", "durian", "fig"]
index = {"current": 0, "final": len(schema_names) * len(table_names)}
host = MemoryStorage.SERVER_HOST if MemoryStorage.SERVER_HOST else "localhost"
print("Creating events ... follow along at http://{}:{}/".format(host, MemoryStorage.SERVER_PORT))
with Monitor("color.fruit", "test", index={"current": 1, "final": 1, "name": "outer"}):
for i, names in enumerate(itertools.product(schema_names, table_names)):
try:
with Monitor(".".join(names), "test", index=dict(index, current=i + 1)):
time.sleep(random.uniform(0.5, 2.0))
# Create an error on one "table" so that highlighting of errors can be tested:
if i == 9:
raise RuntimeError("An error occurred!")
except RuntimeError:
pass
input("Press return (or Ctrl-c) to stop server\n")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# This allows to test the HTTP server. When running inside a Docker container, make sure
# that port 8086 is exposed. (bin/run_arthur.sh -w).
# Invoke using "python -m etl.monitor" inside the Docker container and follow along
# with "open http://localhost:8086" from your host.
test_run()
| mit | -3,399,315,863,732,583,000 | 38.123941 | 110 | 0.600601 | false |
att-comdev/armada | armada/tests/unit/handlers/test_manifest.py | 1 | 19376 | # Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import yaml
import testtools
from armada import const
from armada import exceptions
from armada.handlers import manifest
from armada.utils import validate
class ManifestTestCase(testtools.TestCase):
def setUp(self):
super(ManifestTestCase, self).setUp()
examples_dir = os.path.join(
os.getcwd(), 'armada', 'tests', 'unit', 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read()))
def test_get_documents(self):
armada_manifest = manifest.Manifest(self.documents)
self.assertIsInstance(armada_manifest.charts, list)
self.assertIsInstance(armada_manifest.groups, list)
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(4, len(armada_manifest.charts))
self.assertEqual(2, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(4)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(4, 6)],
armada_manifest.groups)
self.assertEqual(self.documents[-1], armada_manifest.manifest)
def test_get_documents_with_target_manifest(self):
# Validate that specifying `target_manifest` flag returns the correct
# manifest.
armada_manifest = manifest.Manifest(
self.documents, target_manifest='armada-manifest')
self.assertIsInstance(armada_manifest.charts, list)
self.assertIsInstance(armada_manifest.groups, list)
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(4, len(armada_manifest.charts))
self.assertEqual(2, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(4)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(4, 6)],
armada_manifest.groups)
self.assertEqual(self.documents[-1], armada_manifest.manifest)
self.assertEqual('armada-manifest',
self.documents[-1]['metadata']['name'])
def test_get_documents_with_multi_manifest_and_target_manifest(self):
# Validate that specifying `target_manifest` flag returns the correct
# manifest even if there are multiple existing manifests. (Only works
# when the manifest names are distinct or else should raise error.)
documents = copy.deepcopy(self.documents)
other_manifest = copy.deepcopy(self.documents[-1])
other_manifest['metadata']['name'] = 'alt-armada-manifest'
documents.append(other_manifest)
# Specify the "original" manifest and verify it works.
armada_manifest = manifest.Manifest(
documents, target_manifest='armada-manifest')
self.assertIsInstance(armada_manifest.charts, list)
self.assertIsInstance(armada_manifest.groups, list)
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(4, len(armada_manifest.charts))
self.assertEqual(2, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(4)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(4, 6)],
armada_manifest.groups)
self.assertEqual(armada_manifest.manifest, self.documents[-1])
self.assertEqual('armada-manifest',
armada_manifest.manifest['metadata']['name'])
# Specify the alternative manifest and verify it works.
armada_manifest = manifest.Manifest(
documents, target_manifest='alt-armada-manifest')
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(other_manifest, armada_manifest.manifest)
self.assertEqual('alt-armada-manifest',
armada_manifest.manifest['metadata']['name'])
def test_get_manifest(self):
armada_manifest = manifest.Manifest(self.documents)
obtained_manifest = armada_manifest.get_manifest()
self.assertIsInstance(obtained_manifest, dict)
self.assertEqual(obtained_manifest['armada'],
armada_manifest.manifest['data'])
def test_find_documents(self):
armada_manifest = manifest.Manifest(self.documents)
chart_documents, chart_groups, manifests = armada_manifest. \
_find_documents()
# checking if all the chart documents are present
self.assertIsInstance(chart_documents, list)
helm_toolkit_chart = armada_manifest. \
find_chart_document('helm-toolkit')
self.assertEqual(chart_documents[0], helm_toolkit_chart)
mariadb_chart = armada_manifest.find_chart_document('mariadb')
self.assertEqual(chart_documents[1], mariadb_chart)
memcached_chart = armada_manifest.find_chart_document('memcached')
self.assertEqual(chart_documents[2], memcached_chart)
keystone_chart = armada_manifest.find_chart_document('keystone')
self.assertEqual(chart_documents[3], keystone_chart)
# checking if all the chart group documents are present
self.assertIsInstance(chart_groups, list)
keystone_infra_services_chart_group = armada_manifest. \
find_chart_group_document('keystone-infra-services')
self.assertEqual(chart_groups[0],
keystone_infra_services_chart_group)
openstack_keystone_chart_group = armada_manifest. \
find_chart_group_document('openstack-keystone')
self.assertEqual(chart_groups[1], openstack_keystone_chart_group)
# verifying the manifests
self.assertIsInstance(manifests, list)
self.assertEqual(manifests[0], armada_manifest.manifest)
def test_verify_chart_documents(self):
armada_manifest = manifest.Manifest(self.documents)
helm_toolkit_chart = armada_manifest. \
find_chart_document('helm-toolkit')
self.assertIsInstance(helm_toolkit_chart, dict)
self.assertEqual(self.documents[0], helm_toolkit_chart)
mariadb_chart = armada_manifest.find_chart_document('mariadb')
self.assertIsInstance(mariadb_chart, dict)
self.assertEqual(self.documents[1], mariadb_chart)
memcached_chart = armada_manifest.find_chart_document('memcached')
self.assertIsInstance(memcached_chart, dict)
self.assertEqual(self.documents[2], memcached_chart)
keystone_chart = armada_manifest.find_chart_document('keystone')
self.assertIsInstance(keystone_chart, dict)
self.assertEqual(self.documents[3], keystone_chart)
def test_verify_chart_group_documents(self):
armada_manifest = manifest.Manifest(self.documents)
ok_chart = armada_manifest. \
find_chart_group_document('openstack-keystone')
self.assertIsInstance(ok_chart, dict)
self.assertEqual(self.documents[-2], ok_chart)
armada_manifest = manifest.Manifest(self.documents)
kis_chart = armada_manifest.find_chart_group_document(
'keystone-infra-services')
self.assertIsInstance(kis_chart, dict)
self.assertEqual(self.documents[-3], kis_chart)
def test_verify_build_armada_manifest(self):
armada_manifest = manifest.Manifest(self.documents)
built_armada_manifest = armada_manifest.build_armada_manifest()
self.assertIsNotNone(built_armada_manifest)
self.assertIsInstance(built_armada_manifest, dict)
# the first chart group in the Armada manifest
keystone_infra_services_chart_group = armada_manifest. \
find_chart_group_document('keystone-infra-services')
keystone_infra_services_chart_group_data = \
keystone_infra_services_chart_group.get('data')
self.assertEqual(keystone_infra_services_chart_group_data,
built_armada_manifest['data']['chart_groups'][0])
# the first chart group in the Armada manifest
openstack_keystone_chart_group = armada_manifest. \
find_chart_group_document('openstack-keystone')
openstack_keystone_chart_group_data = \
openstack_keystone_chart_group.get('data')
self.assertEqual(openstack_keystone_chart_group_data,
built_armada_manifest['data']['chart_groups'][1])
def test_verify_build_chart_group_deps(self):
armada_manifest = manifest.Manifest(self.documents)
# building the deps for openstack-keystone chart group
chart_group = armada_manifest.find_chart_group_document(
'openstack-keystone')
openstack_keystone_chart_group_deps = armada_manifest. \
build_chart_group(chart_group)
openstack_keystone_chart_group_deps_dep_added = \
openstack_keystone_chart_group_deps[
'data']['chart_group'][0]['chart']['dependencies']
# keystone chart dependencies
keystone_chart = armada_manifest.find_chart_document('keystone')
keystone_chart_with_deps = armada_manifest.build_chart_deps(
keystone_chart)
keystone_dependencies = keystone_chart_with_deps[
'data']['dependencies']
self.assertEqual(openstack_keystone_chart_group_deps_dep_added[0],
keystone_dependencies[0])
# building the deps for openstack-keystone chart group
chart_group = armada_manifest.find_chart_group_document(
'keystone-infra-services')
openstack_keystone_chart_group_deps = armada_manifest. \
build_chart_group(chart_group)
keystone_infra_services_dep_added = \
openstack_keystone_chart_group_deps[
'data']['chart_group'][0]['chart']['dependencies']
# building mariadb chart dependencies
mariadb_chart = armada_manifest.find_chart_document('mariadb')
mariadb_chart_with_deps = armada_manifest.build_chart_deps(
mariadb_chart)
mariadb_dependencies = mariadb_chart_with_deps[
'data']['dependencies']
# building memcached chart dependencies
memcached_chart = armada_manifest.find_chart_document('memcached')
memcached_chart_with_deps = armada_manifest.build_chart_deps(
memcached_chart)
memcached_dependencies = memcached_chart_with_deps[
'data']['dependencies']
self.assertEqual(keystone_infra_services_dep_added[0],
mariadb_dependencies[0])
self.assertEqual(keystone_infra_services_dep_added[0],
memcached_dependencies[0])
def test_verify_build_chart_deps(self):
armada_manifest = manifest.Manifest(self.documents)
# helm-toolkit chart
helm_toolkit_chart = armada_manifest.find_chart_document(
'helm-toolkit')
helm_toolkit_original_dependency = helm_toolkit_chart.get('data')
helm_toolkit_chart_with_deps = armada_manifest.build_chart_deps(
helm_toolkit_chart).get('data')
# since not dependent on other charts, the original and modified
# dependencies are the same
self.assertEqual(helm_toolkit_original_dependency,
helm_toolkit_chart_with_deps)
# helm-toolkit dependency, the basis for comparison of d
# ependencies in other charts
expected_helm_toolkit_dependency = {'chart': helm_toolkit_chart.get(
'data')}
# keystone chart dependencies
keystone_chart = armada_manifest.find_chart_document('keystone')
original_keystone_chart = copy.deepcopy(keystone_chart)
keystone_chart_with_deps = armada_manifest.build_chart_deps(
keystone_chart)
self.assertNotEqual(original_keystone_chart, keystone_chart_with_deps)
self.assertIn('data', keystone_chart_with_deps)
self.assertIn('dependencies', keystone_chart_with_deps['data'])
keystone_dependencies = keystone_chart_with_deps[
'data']['dependencies']
self.assertIsInstance(keystone_dependencies, list)
self.assertEqual(1, len(keystone_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
keystone_dependencies[0])
# mariadb chart dependencies
mariadb_chart = armada_manifest.find_chart_document('mariadb')
original_mariadb_chart = copy.deepcopy(mariadb_chart)
mariadb_chart_with_deps = armada_manifest.build_chart_deps(
mariadb_chart)
self.assertNotEqual(original_mariadb_chart, mariadb_chart_with_deps)
self.assertIn('data', mariadb_chart_with_deps)
self.assertIn('dependencies', mariadb_chart_with_deps['data'])
mariadb_dependencies = mariadb_chart_with_deps[
'data']['dependencies']
self.assertIsInstance(mariadb_dependencies, list)
self.assertEqual(1, len(mariadb_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
mariadb_dependencies[0])
# memcached chart dependencies
memcached_chart = armada_manifest.find_chart_document('memcached')
original_memcached_chart = copy.deepcopy(memcached_chart)
memcached_chart_with_deps = armada_manifest.build_chart_deps(
memcached_chart)
self.assertNotEqual(original_memcached_chart,
memcached_chart_with_deps)
self.assertIn('data', memcached_chart_with_deps)
self.assertIn('dependencies', memcached_chart_with_deps['data'])
memcached_dependencies = memcached_chart_with_deps[
'data']['dependencies']
self.assertIsInstance(memcached_dependencies, list)
self.assertEqual(1, len(memcached_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
memcached_dependencies[0])
class ManifestNegativeTestCase(testtools.TestCase):
def setUp(self):
super(ManifestNegativeTestCase, self).setUp()
examples_dir = os.path.join(
os.getcwd(), 'armada', 'tests', 'unit', 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read()))
def test_get_documents_multi_manifests_raises_value_error(self):
# Validates that finding multiple manifests without `target_manifest`
# flag raises exceptions.ManifestException.
documents = copy.deepcopy(self.documents)
documents.append(documents[-1]) # Copy the last manifest.
error_re = r'Multiple manifests are not supported.*'
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents)
def test_get_documents_multi_target_manifests_raises_value_error(self):
# Validates that finding multiple manifests with `target_manifest`
# flag raises exceptions.ManifestException.
documents = copy.deepcopy(self.documents)
documents.append(documents[-1]) # Copy the last manifest.
error_re = r'Multiple manifests are not supported.*'
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents, target_manifest='armada-manifest')
def test_get_documents_missing_manifest(self):
# Validates exceptions.ManifestException is thrown if no manifest is
# found. Manifest is last document in sample YAML.
error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*')
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
self.documents[:-1])
def test_get_documents_missing_charts(self):
# Validates exceptions.ManifestException is thrown if no chart is
# found. Charts are first 4 documents in sample YAML.
error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*')
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
self.documents[4:])
def test_get_documents_missing_chart_groups(self):
# Validates exceptions.ManifestException is thrown if no chart is
# found. ChartGroups are 5-6 documents in sample YAML.
documents = self.documents[:4] + [self.documents[-1]]
error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*')
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents)
def test_find_chart_document_negative(self):
armada_manifest = manifest.Manifest(self.documents)
error_re = r'Could not find a %s named "%s"' % (
const.DOCUMENT_CHART, 'invalid')
self.assertRaisesRegexp(exceptions.ManifestException, error_re,
armada_manifest.find_chart_document, 'invalid')
def test_find_group_document_negative(self):
armada_manifest = manifest.Manifest(self.documents)
error_re = r'Could not find a %s named "%s"' % (
const.DOCUMENT_GROUP, 'invalid')
self.assertRaisesRegexp(exceptions.ManifestException, error_re,
armada_manifest.find_chart_group_document,
'invalid')
def test_build_chart_deps_with_missing_dependency_fails(self):
"""Validate that attempting to build a chart that points to
a missing dependency fails.
"""
self.documents[1]['data']['dependencies'] = ['missing-dependency']
valid, details = validate.validate_armada_documents(self.documents)
self.assertFalse(valid)
def test_build_chart_group_with_missing_chart_grp_fails(self):
"""Validate that attempting to build a chart group document with
missing chart group fails.
"""
self.documents[5]['data']['chart_group'] = ['missing-chart-group']
valid, details = validate.validate_armada_documents(self.documents)
self.assertFalse(valid)
def test_build_armada_manifest_with_missing_chart_grps_fails(self):
"""Validate that attempting to build a manifest with missing
chart groups fails.
"""
self.documents[6]['data']['chart_groups'] = ['missing-chart-groups']
valid, details = validate.validate_armada_documents(self.documents)
self.assertFalse(valid)
| apache-2.0 | 8,929,328,809,343,949,000 | 43.748268 | 79 | 0.658805 | false |
linuxrocks123/MailTask | mt_chronos.py | 1 | 1986 | #! /usr/bin/env python
# MailTask Alpha: The Email Manager
# Copyright (C) 2015 Patrick Simmons
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#Note: This library should have no dependencies on other parts of MailTask.
#This is to allow Chronos-Ananke messages to be generated and parsed
#from external software.
#Just to be clear, the license to this file is still GPLv3, though.
##Given a string representing the body of an email message,
# return a four-tuple with the information necessary to create
# a calendar event from it using the MT-CHRONOS-ANANKE format.
# The format of the return tuple:
# ("summary","description",epoch-starttime,epoch-endtime)
def extract_calendar_event(email_body):
inside_calendar=False
lines = email_body.splitlines()
i=0
while i<len(lines):
if lines[i].find("MT-CHRONOS-ANANKE")!=-1:
try:
to_return=(lines[i+1],lines[i+2],int(lines[i+3]),int(lines[i+4]))
return to_return
except:
pass
i+=1
return None
##Generate the MT-CHRONOS-ANANKE event string to put in the body of an email message
def gen_calendar_event(summary,description,starttime,endtime):
to_return="MT-CHRONOS-ANANKE\n"
to_return+=summary+"\n"
to_return+=description+"\n"
to_return+=repr(starttime)+"\n"
to_return+=repr(endtime)+"\n"
return to_return
| gpl-3.0 | -4,021,911,731,758,143,000 | 37.192308 | 84 | 0.703424 | false |
delcypher/klee-runner | tools/show-klee-dir.py | 1 | 5821 | #!/usr/bin/env python
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Perform verification of a klee-runner result yaml file and associated working
directory.
"""
import argparse
from enum import Enum
import logging
import os
# pylint: disable=wrong-import-position
from load_klee_analysis import add_kleeanalysis_to_module_search_path
from load_klee_runner import add_KleeRunner_to_module_search_path
add_kleeanalysis_to_module_search_path()
add_KleeRunner_to_module_search_path()
import KleeRunner.ResultInfo
import KleeRunner.DriverUtil as DriverUtil
from kleeanalysis.kleedir.kleedir import KleeDir
_logger = logging.getLogger(__name__)
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("klee_dir", default=None)
parser.add_argument("--messages",
default=False,
action="store_true")
parser.add_argument("--warnings",
default=False,
action="store_true")
parser.add_argument("--show-invalid",
dest="show_invalid",
default=False,
action="store_true")
parser.add_argument("--show-error-locations",
dest="show_error_locations",
default=False,
action="store_true")
DriverUtil.parserAddLoggerArg(parser)
args = parser.parse_args(args=argv)
DriverUtil.handleLoggerArgs(args, parser)
if not os.path.exists(args.klee_dir):
_logger.error("Klee directory \"{}\" does not exist".format(args.klee_dir))
return 1
if not os.path.isdir(args.klee_dir):
_logger.error("\"{}\" is not a directory".format(args.klee_dir))
return 1
_logger.info('Reading KLEE directory "{}"'.format(args.klee_dir))
klee_dir = KleeDir(args.klee_dir)
_logger.info('Finished reading KLEE directory')
# Report stuff about the KLEE directory
if not klee_dir.is_valid:
if args.show_invalid:
_logger.warning("\n")
_logger.warning('KLEE directory is invalid. Showing all available information.')
_logger.warning("\n")
else:
_logger.error('KLEE directory is invalid (use --show-invalid to show anyway)')
return 1
_logger.info('Total # of test cases: {}'.format(len(klee_dir.tests)))
_logger.info('#'*70)
abort_errors = list(klee_dir.abort_errors)
_logger.info('# of abort errors: {}'.format(len(abort_errors)))
show_error_locations(abort_errors, args.show_error_locations)
assert_errors = list(klee_dir.assertion_errors)
_logger.info('# of assert errors: {}'.format(len(assert_errors)))
show_error_locations(assert_errors, args.show_error_locations)
division_errors = list(klee_dir.division_errors)
_logger.info('# of division errors: {}'.format(len(division_errors)))
show_error_locations(division_errors, args.show_error_locations)
execution_errors = list(klee_dir.execution_errors)
_logger.info('# of execution errors: {}'.format(len(execution_errors)))
show_error_locations(execution_errors, args.show_error_locations)
free_errors = list(klee_dir.free_errors)
_logger.info('# of free errors: {}'.format(len(free_errors)))
show_error_locations(free_errors, args.show_error_locations)
overflow_errors = list(klee_dir.overflow_errors)
_logger.info('# of overflow errors: {}'.format(len(overflow_errors)))
show_error_locations(overflow_errors, args.show_error_locations)
overshift_errors = list(klee_dir.overshift_errors)
_logger.info('# of overshift errors: {}'.format(len(overshift_errors)))
ptr_errors = list(klee_dir.ptr_errors)
_logger.info('# of ptr errors: {}'.format(len(ptr_errors)))
read_only_errors = list(klee_dir.read_only_errors)
_logger.info('# of read only errors: {}'.format(len(read_only_errors)))
user_errors = list(klee_dir.user_errors)
_logger.info('# of user errors: {}'.format(len(user_errors)))
misc_errors = list(klee_dir.misc_errors)
_logger.info('# of misc errors: {}'.format(len(misc_errors)))
_logger.info('#'*70)
successful_terminations = list(klee_dir.successful_terminations)
_logger.info('# of successful terminations: {}'.format(len(successful_terminations)))
_logger.info('#'*70)
early_terminations = list(klee_dir.early_terminations)
_logger.info('# of early terminations: {}'.format(len(early_terminations)))
# Show the reason for early termination by count
reasonCounts = dict()
for t in early_terminations:
msg = ' '.join(t.early.message).strip()
if msg not in reasonCounts:
reasonCounts[msg] = 1
else:
reasonCounts[msg] += 1
for reason, count in sorted(reasonCounts.items(), key=lambda i: i[0]):
_logger.info("\"{}\": {}".format(reason, count))
if args.messages:
_logger.info('#'*70)
msgs = ''.join(klee_dir.messages)
_logger.info('KLEE messages:\n{}'.format(msgs))
if args.warnings:
_logger.info('#'*70)
warnings = ''.join(klee_dir.warnings)
_logger.info('KLEE warnings:\n{}'.format(warnings))
def show_error_locations(tests, enabled):
assert isinstance(tests, list)
if not enabled:
return
for test in tests:
error = test.error
msg = "{msg}: {file}:{line}\n".format(
file=error.file,
line=error.line,
msg=error.message)
msg += "assembly line: {}\n".format(error.assembly_line)
if len(error.stack) > 0:
msg += "stack:\n"
for l in error.stack:
msg += l
_logger.info(msg)
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:]))
| mit | -734,669,082,042,310,700 | 36.076433 | 92 | 0.633912 | false |
codesy/codesy | payments/migrations/0001_initial.py | 1 | 1590 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-08-03 23:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='StripeAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_id', models.CharField(blank=True, max_length=100)),
('secret_key', models.CharField(blank=True, max_length=100)),
('public_key', models.CharField(blank=True, max_length=100)),
('available_balance', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=6)),
('verification', models.TextField(blank=True, default=b'')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='StripeEvent',
fields=[
('event_id', models.CharField(blank=True, max_length=100, primary_key=True, serialize=False)),
('type', models.CharField(blank=True, max_length=100)),
('message_text', models.TextField()),
('processed', models.BooleanField(default=False)),
],
),
]
| agpl-3.0 | -4,125,141,983,997,128,000 | 38.75 | 118 | 0.597484 | false |
Dutchj/pbtweeter | pbtweeter/speedrun.py | 1 | 1678 | import config as cfg
import json
from datetime import datetime
from urllib2 import urlopen, quote
def get_lb():
try:
response = urlopen('http://www.speedrun.com/api_records.php?amount=999&game='+quote(cfg.game))
return json.load(response)
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), 'Error getting leaderboard data:', e
return
def get_twitter_handle(user):
try:
response = urlopen('http://www.speedrun.com/api/v1/users?max=200&name='+user)
users = json.load(response)
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), 'Error getting user search:', e
return
else:
if users['data'] is []:
print "Unable to retrieve Twitter handle: No data, user most likely doesn't exist"
return ''
for entry in users['data']:
if entry['names']['international'].lower() == user.lower():
identifier = entry['id']
break
else:
print "Unable to retrieve Twitter handle: User doesn't exist"
return ''
try:
response = urlopen('http://www.speedrun.com/api/v1/users/'+str(identifier))
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), 'Error getting user data:', e
return
else:
user_data = json.load(response)
if user_data['data']['twitter'] is None:
return ''
twitter_link = user_data['data']['twitter']['uri']
return '@' + twitter_link.replace('http://www.twitter.com/', '').replace('%40', '')
| gpl-2.0 | -8,179,702,171,126,471,000 | 37.136364 | 102 | 0.564958 | false |
wpoely86/easybuild-easyblocks | easybuild/easyblocks/r/rserve.py | 1 | 1691 | ##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing the Bioconductor R library, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
"""
from easybuild.easyblocks.generic.rpackage import RPackage
class EB_Rserve(RPackage):
"""Build and install Rserve R library."""
def run(self):
"""Set LIBS environment variable correctly prior to building."""
self.configurevars = ['LIBS="$LIBS -lpthread"']
super(EB_Rserve, self).run()
| gpl-2.0 | -6,841,785,805,231,180,000 | 37.431818 | 101 | 0.740982 | false |
mjirik/lisa | lisa/vessels_segmentation.py | 1 | 3265 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import sys
import os.path
import io3d
path_to_script = os.path.dirname(os.path.abspath(__file__))
#import featurevector
import unittest
from loguru import logger
# logger = logging.getLogger()
# import apdb
# apdb.set_trace();
# import scipy.io
import numpy as np
# ----------------- my scripts --------
import argparse
from . import segmentation_general
class VesselSegmentation:
def __init__(self):
pass
def set_params(self):
pass
def run(self):
pass
def get_output(self):
pass
if __name__ == "__main__":
## logger = logging.getLogger()
# logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
#logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(description='Segment vessels from liver')
parser.add_argument('-dd','--dcmdir',
default=None,
help='path to data dir')
parser.add_argument('-d', '--debug', action='store_true',
help='run in debug mode')
parser.add_argument('-i', '--inputfile', default=None,
help='input file or directory with data')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
defaultoutputfile = "vessels.pkl"
if args.defaultoutputfile:
args.outputfile = defaultoutputfile
#else:
#dcm_read_from_dir('/home/mjirik/data/medical/data_orig/46328096/')
#data3d, metadata = dcmreaddata.dcm_read_from_dir()
datap = io3d.read(args.inputfile)
import sed3
# pyed = sed3.sed3(oseg.orig_scale_segmentation)
#pyed.show()
# information about crop
#cri = oseg.crinfo
#oseg.data3d = oseg.data3d[cri[0][0]:cri[0][1],cri[1][0]:cri[1][1],cri[2][0]:cri[2][1]]
#pyed = sed3.sed3(oseg.data3d, contour = oseg.orig_scale_segmentation)
print('slab', datap['slab'])
#import ipdb; ipdb.set_trace() # BREAKPOINT
#pyed = sed3.sed3(data['data3d'], contour = data['segmentation'])
#pyed.show()
#import pdb; pdb.set_trace()
outputTmp = segmentation_general.vesselSegmentation(
datap['data3d'],
segmentation = datap['segmentation'],
#segmentation = oseg.orig_scale_segmentation,
threshold = -1,
inputSigma = 0.15,
dilationIterations = 2,
nObj = 1,
biggestObjects = args.biggest,
# dataFiltering = True,
interactivity = True,
binaryClosingIterations = 2,
binaryOpeningIterations = 0)
datap['slab']['none'] = 0
datap['slab']['liver'] = 1
datap['slab']['porta'] = 2
#print np.max(output)
#import pdb; pdb.set_trace()
#data = {}
#data['data3d'] = oseg.data3d
#data['crinfo'] = oseg.crinfo
#data['segmentation'] = oseg.segmentation
datap['segmentation'][output] = datap['slab']['porta']
#data['slab'] = slab
pyed = sed3.sed3(datap['data3d'], contour=datap['segmentation'] == datap['slab']['porta'])
pyed.show()
#pyed = sed3.sed3(data['segmentation'])
#pyed.show()
# Uvolneni pameti
if args.outputfile == None:
io3d.write(datap, args.outpufile)
| bsd-3-clause | -3,153,561,909,537,196,000 | 23.89313 | 94 | 0.616069 | false |
voilet/cmdb | assets/ztree/api.py | 1 | 11656 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# FileName: api.py
# Desc: 2015-15/4/16:下午5:54
# Author: 苦咖啡
# Email: [email protected]
# HomePage: http://blog.kukafei520.net
# History:
# =============================================================================
from django.shortcuts import render_to_response, HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
import commands, json, yaml
from assets.models import Project
from mysite.settings import auth_key
from assets.models import Host, IDC
import hashlib, time
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import redirect
# 登录
from users.models import CustomUser
from assets.models import project_swan
from assets.ztree.service import ztree_tag
from django.shortcuts import get_object_or_404
from assets.models import Host, IDC, Service, Line, Project, HostRecord
from cmdb_auth.models import AuthNode
# songxs add
@login_required
def ztree_project(request):
line_list = Line.objects.filter()
business = Project.objects.filter(line__isnull=False)
no_business = Project.objects.filter(line__isnull=True)
ztree_data = ztree_tag(request.user.username)
return render_to_response('default/default.html', locals(), context_instance=RequestContext(request))
@login_required
def ztree_business(request):
"""
树请求验证
:param request:
:return:
"""
business_name = request.GET.get("uuid", False)
get_token = str(request.GET.get("token", False))
ztree_data = ztree_tag(request.user.username)
try:
sum_token = str(hashlib.sha1(request.user.username + auth_key + business_name +
time.strftime('%Y-%m-%d', time.localtime(time.time()))).hexdigest())
except TypeError:
sum_token = False
if request.GET.get("options") == "host":
uuid = request.GET.get('uuid', '')
ip = request.GET.get('ip', '')
if uuid:
host = get_object_or_404(Host, uuid=uuid)
elif ip:
host = get_object_or_404(Host, eth1=ip)
host_record = HostRecord.objects.filter(host=host).order_by('-time')
user_audit = AuthNode.objects.filter(node=host)
audit_count = user_audit.count()
return render_to_response('ztree/host_detail.html', locals(), context_instance=RequestContext(request))
content_status = True
idle = request.GET.get("idle", False)
if get_token != sum_token:
content_status = False
return render_to_response('ztree/ztree_service.html', locals(), context_instance=RequestContext(request))
if business_name != u"未分类":
try:
bus_data = Project.objects.get(uuid=request.GET.get("uuid"))
if not idle:
server_list = Host.objects.filter(business=bus_data, idle=True).order_by("create_time")
else:
server_list = Host.objects.filter(business=bus_data, idle=False).order_by("create_time")
except:
pass
else:
bus_data = u'未分类'
idc_data = IDC.objects.filter(type=1)
if not idle:
server_list = Host.objects.filter(business__isnull=True, idc=idc_data, idle=True).order_by("create_time")
else:
server_list = Host.objects.filter(business__isnull=True, idc=idc_data, idle=False).order_by("create_time")
if request.GET.get("options") == "swan_push":
s = Ztree_class(business_name, request.user.first_name)
rst = s.swan()
rst_data = rst.get("swan_name")
status = len(rst_data)
return render_to_response('ztree/swan.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "doc":
data = Project.objects.get(pk=business_name)
# return render_to_response('ztree/swan.html', locals(), context_instance=RequestContext(request))
return render_to_response('markdown/index.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "highstate":
project = Project.objects.get(uuid=business_name)
host_list = Host.objects.filter(business=project)
return render_to_response('ztree/highstate.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "monitor":
return render_to_response('ztree/zabbix_count.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "salt":
return render_to_response('ztree/saltstack.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "project":
ip_list = []
server_list = {}
line_name = Line.objects.get(pk=business_name)
business_data = Project.objects.filter(line=business_name)
for i in business_data:
node = Host.objects.filter(business=i, idle=True)
for k in node:
if k.eth1 not in ip_list:
ip_list.append(k.eth1)
server_list[str(k.uuid)] = k.eth1
count = len(ip_list)
return render_to_response('ztree/project.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "types":
get_env = request.GET.get("name")
business_data = Project.objects.filter(pk=business_name)
server_list = Host.objects.filter(business=business_data, env=get_env).order_by("-create_time")
count = server_list.count()
return render_to_response('ztree/ztree.html', locals(), context_instance=RequestContext(request))
if request.GET.get("options") == "service":
s = []
bus_data = Project.objects.get(uuid=business_name)
server_list = Host.objects.filter(business=bus_data, idle=True).order_by("create_time")
for i in server_list:
t = i.service.all()
for b in t:
if b not in s:
s.append(b)
tag = request.GET.get("tgt", False)
if tag:
service_all = Service.objects.get(name=tag)
server_list = Host.objects.filter(service=service_all, business=bus_data)
return render_to_response('ztree/ztree_service.html', locals(), context_instance=RequestContext(request))
count = server_list.count()
return render_to_response('ztree/ztree.html', locals(), context_instance=RequestContext(request))
@login_required
def CdnCache(request):
"""
树请求验证
:param request:
:return:
"""
service = request.GET.get("services")
get_token = str(request.GET.get("token"))
uuid = str(request.GET.get("uuid"))
sum_token = str(hashlib.sha1(request.user.username + auth_key + service + time.strftime('%Y-%m-%d', time.localtime(
time.time()))).hexdigest())
content_status = True
if get_token != sum_token:
content_status = False
idc_data = IDC.objects.get(uuid=uuid)
service_all = Service.objects.get(name=service)
server_list = Host.objects.filter(idc=idc_data, service=service_all)
business_name = idc_data.name
service_tag = service
return render_to_response('ztree/service.html', locals(), context_instance=RequestContext(request))
@login_required
def CdnIdc(request):
"""
树请求验证
:param request:
:return:
"""
get_token = str(request.GET.get("token"))
uuid = str(request.GET.get("uuid"))
idc_data = IDC.objects.get(uuid=uuid)
sum_token = str(hashlib.sha1(request.user.username + auth_key + idc_data.name + time.strftime('%Y-%m-%d',
time.localtime(
time.time()))).hexdigest())
content_status = True
if get_token != sum_token:
content_status = False
server_list = Host.objects.filter(idc=idc_data)
business_name = idc_data.name
return render_to_response('ztree/idc.html', locals(), context_instance=RequestContext(request))
class Ztree_class(object):
"""
ztree 类
"""
def __init__(self, project_name, user):
self.project_name = project_name
self.user = user
def monitor(self):
return True
def swan(self):
rst_data = {}
user_info = CustomUser.objects.get(first_name=self.user)
myform_rst = Project.objects.get(uuid=self.project_name)
rst = project_swan.objects.filter(project_name_id=myform_rst.uuid)
"""
所有当前项目发布名称放到一个list中
"""
swan_name_list = [i.swan_name for i in rst]
swan_push = user_info.project_swan_set.all()
user = CustomUser.objects.get(first_name=self.user)
if user.is_superuser:
for i in rst:
rst_data[str(i.uuid)] = i.swan_name
else:
swan_push = user_info.project_swan_set.all()
for i in swan_push:
if i.swan_name in swan_name_list:
rst_data[str(i.uuid)] = i.swan_name
host_list = myform_rst.host_set.all()
content = {"swan_name": rst_data, "host": host_list}
return content
def highstate(self):
project = Project.objects.get(service_name=self.project_name)
# server_list = project.host_set
host_list = Host.objects.filter(business=project)
return True
@csrf_exempt
def ZtreeIndex(request):
"""
:param request:
:return:
"""
if request.method == 'POST':
otherParam = request.POST.get("otherParam")
status = request.POST.get("status")
line_id = request.POST.get("line_id")
try:
name = request.POST.get("name")
id = request.POST.get("id")
except:
name = False
if not name:
ztree = ztree_tag(request.user.username)
return HttpResponse(json.dumps(ztree, ensure_ascii=False, indent=4))
elif int(status[0]) == 1:
ztree = []
return HttpResponse(json.dumps(ztree, ensure_ascii=False, indent=4))
else:
ztree = []
bus_data = Project.objects.get(service_name=name)
server_list = Host.objects.filter(business=bus_data).order_by("create_time")
s = []
for i in server_list:
t = i.service.all().values()
for b in t:
if b not in s:
s.append(b)
tree_id = 0
for i in s:
tree_id += 1
token = hashlib.sha1(request.user.username + auth_key + i.get("name") + time.strftime('%Y-%m-%d',
time.localtime(
time.time()))).hexdigest()
ztree.append({"id": tree_id, "status": 3, "line_id": line_id, "name": i.get("name"), "token": token,
"t": i.get("name"), "business": bus_data.service_name})
return HttpResponse(json.dumps(ztree, ensure_ascii=False, indent=4))
content = {"status": 403, "message": "auth error"}
return HttpResponse(json.dumps(content, ensure_ascii=False, indent=4))
| agpl-3.0 | 1,779,326,221,662,780,700 | 34.814241 | 136 | 0.585927 | false |
Cygnus-Inc/Cygnet-Adapter | docs/conf.py | 1 | 1240 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
sys.path.append(os.path.abspath('src/cygnet_adapter'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon'
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'cygnet_adapter'
year = u'2015'
author = u'Cygnus'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.1.0'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
pygments_style = 'trac'
templates_path = ['.']
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = True
html_sidebars = {'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'], }
html_short_title = '%s-%s' % (project, version)
| apache-2.0 | -935,034,756,308,814,500 | 27.181818 | 81 | 0.670161 | false |
qpiel/python_estimation_source | Script/AllSourcesFit_Ctools.py | 1 | 3157 | import os,sys
from os.path import join
import ctools
from ctoolsAnalysis.config import get_config,get_default_config
from ctoolsAnalysis.LikeFit import CTA_ctools_analyser
from Common_Functions import *
Astropy = True
try :
import Coordinates.CoordHandler as CH
import Coordinates.CoordUtilities as CU
from astropy.coordinates import FK5
except :
Astropy = False
def DoFit(source,area,simutime):
#define source name. This also allows to have the coordinate if you have astropy
#source = "AP Librae"
ra = None
dec = None
if Astropy:
coord = CH.CoordinatesHandler.fromName(source,FK5)
ra, dec = CU.GetCoordInDegrees(coord)
#default work and out path. Not use if you provide a config file in the command line
out = join(os.getcwd(), "out")
work = join(os.getcwd(), "work")
# setup : Time, Energy and IRFS. Not use if you provide a config file in the command line
tmin = 0
#tmax = 3600
tmaxtmp=simutime*3600
tmax = int(tmaxtmp)
irfTime=IrfChoice(simutime)
emin = 0.2
emax = 10
irf = str(area)+"_"+str(irfTime)+"h"
caldb = "prod2"
#irf = "South_0.5h"
#quentin
fitsFile = out+"/"+source.replace(" ","")+"_"+str(simutime)+"h"+"_event00001.fits"
#fitsFile="/Users/gateflorian/Documents/CTAtools/Script/out/Mkn180_event00001.fits"
#fitsFile = "/Users/gateflorian/Documents/CTAtools/Script/out/table_20161213.fits"
try: #conf file provided
config_tmp = get_config(sys.argv[-1])
config = MakeconfigFromFile(out,work,source,ra,dec,config_tmp)
#config["file"]["inobs"] = sys.argv[-2]
config["file"]["inobs"] = fitsFile
except: #Not use if you provide a config file in the command line
config = MakeconfigFromDefault(out,work,source,ra,dec)
config["file"]["inobs"] = fitsFile
#config["file"]["inobs"] = sys.argv[-1]
config.write(open(work+"/Fit_"+source.replace(" ","")+"_"+str(simutime)+"h"+".conf", 'w'))
Analyse = CTA_ctools_analyser.fromConfig(config)
#set up if there is no config file provided
if len(sys.argv) == 1 :
Analyse.SetTimeRange(tmin,tmax)
Analyse.SetEnergyRange(emin,emax)
Analyse.SetIRFs(caldb,irf)
Analyse.create_fit(log = True,debug = False)
Analyse.fit()
Analyse.PrintResults()
def IrfChoice(simutime):
attempt1=abs(simutime-0.5)
attempt2=abs(simutime-5)
attempt3=abs(simutime-50)
mini=min(attempt1,attempt2,attempt3)
if mini==attempt1:
irfResult=0.5
elif mini==attempt2:
irfResult=5
else:
irfResult=50
return irfResult
#DoFit("AP Librae","South",0.5)
with open("Source2.txt") as f:
for line in f:
# print (line.split("\t")[0])
DoFit(line.split("\t")[0],line.split("\t")[5],0.3)
#raw_input("Press Enter to terminate.")
#DoFit(line.split("\t")[0],line.split("\t")[5],3)
#raw_input("Press Enter to terminate.")
#DoFit(line.split("\t")[0],line.split("\t")[5],10)
#DoFit(line.split("\t")[0],line.split("\t")[5],30)
#DoFit(line.split("\t")[0],line.split("\t")[5],100)
| gpl-3.0 | -4,762,860,779,201,160,000 | 31.214286 | 98 | 0.638581 | false |
Aeronautics/aero | aero/commands/install.py | 1 | 1362 | # -*- coding: utf-8 -*-
from aero.__version__ import __version_info__
__author__ = 'nickl-'
from .base import CommandProcessor as CommandProcessor
class InstallCommand(CommandProcessor):
from .base import coroutine
package = ''
adapter = ''
def wiring(self):
self.out = self.write()
self.ticker.routine(self.progress(None))
return self.each(self.spacing(self.call(self.res())))
def seen(self, command, adapter, package, result=False):
self.package = package
self.adapter = adapter
return result
@coroutine
def res(self):
while True:
res = (yield)
if res[1] == 0:
print 'Successfully installed package: {} with {}'.format(self.package, self.adapter)
else:
print 'Aborted: Error while installing package: {} {} returned exit code {}'.format(
self.package, self.adapter, res[1]
)
@coroutine
def write(self):
import sys
out = sys.stdout
while True:
text = (yield)
out.write(text)
@coroutine
def spacing(self, target):
while True:
payload = (yield)
print u'\n'
target.send(payload)
@coroutine
def progress(self, responder):
while True: (yield)
| bsd-3-clause | -6,355,324,622,234,375,000 | 24.698113 | 101 | 0.556535 | false |
pmghalvorsen/gramps_branch | gramps/gui/editors/displaytabs/groupembeddedlist.py | 1 | 14240 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# python
#
#-------------------------------------------------------------------------
import sys
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import pickle
#-------------------------------------------------------------------------
#
# GTK libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
from gi.repository import GObject
from gi.repository import GLib
#-------------------------------------------------------------------------
#
# GRAMPS classes
#
#-------------------------------------------------------------------------
from ...utils import is_right_click
from .embeddedlist import EmbeddedList, TEXT_COL, MARKUP_COL, ICON_COL
#-------------------------------------------------------------------------
#
# Classes
#
#-------------------------------------------------------------------------
class GroupEmbeddedList(EmbeddedList):
"""
This class provides the base class for all the list tabs that show
grouped data.
It maintains a Gtk.TreeView, including the selection and button sensitivity.
"""
_WORKGROUP = 0
def __init__(self, dbstate, uistate, track, name, build_model,
share_button=False, move_buttons=False, jump_button=False, **kwargs):
"""
Create a new list, using the passed build_model to populate the list.
"""
self.kwargs = kwargs
EmbeddedList.__init__(self, dbstate, uistate, track, name, build_model,
share_button, move_buttons, jump_button)
#connect click on the first column
self.columns[0].connect('clicked', self.groupcol_click)
for col in self.columns[1:]:
col.connect('clicked', self.col_click)
self.dbsort = True
def construct_model(self):
"""
Method that creates the model using the passed build_model parameter
Overwrites the EmbeddedList calling sequence by adding the different
groups
"""
return self.build_model(self.get_data(), self.dbstate.db,
self.groups(), **self.kwargs)
def groups(self):
"""
Return the (group key, group name)s in the order as given by get_data()
"""
raise NotImplementedError
def groupcol_click(self, obj):
"""
The group column is clicked, sort it as it was
"""
self.columns[0].set_sort_order(Gtk.SortType.ASCENDING)
self.rebuild()
self.dbsort = True
def col_click(self, obj):
self.dbsort = False
def _on_button_press(self, obj, event):
"""
Handle button press, not double-click, that is done in init_interface
"""
self._select_row_at_coords(event.x, event.y)
if is_right_click(event):
obj = self.get_selected()
if obj and obj[1]:
self._tmpgroup = obj[0]
self.right_click(obj[1], event)
return True
elif event.type == Gdk.EventType.BUTTON_PRESS and event.button == 2:
fun = self.get_middle_click()
if fun:
fun()
return True
return False
def is_empty(self):
"""
Return True if the get_data returns a length greater than
0. Typically, get_data returns the list of associated data.
"""
return len(self.get_data()[self._WORKGROUP]) == 0
def drag_data_get(self, widget, context, sel_data, info, time):
"""
Provide the drag_data_get function, which passes a tuple consisting of:
1) Drag type defined by the .drag_type field specified by the value
assigned to _DND_TYPE
2) The id value of this object, used for the purpose of determining
the source of the object. If the source of the object is the same
as the object, we are doing a reorder instead of a normal drag
and drop
3) Pickled data. The pickled version of the selected object
4) Source row. Used for a reorder to determine the original position
of the object
"""
# get the selected object, returning if not is defined
obj = self.get_selected()
if not obj or obj[1] is None:
#nothing selected or a grouping selected
return
# pickle the data, and build the tuple to be passed
value = (self._DND_TYPE.drag_type, id(self), obj[1],
self.find_index(obj))
data = pickle.dumps(value)
# pass as a string (8 bits)
sel_data.set(self._DND_TYPE.atom_drag_type, 8, data)
def drag_data_received(self, widget, context, x, y, sel_data, info, time):
"""
Handle the standard gtk interface for drag_data_received.
If the selection data is define, extract the value from sel_data.data,
and decide if this is a move or a reorder.
"""
if sel_data and sel_data.get_data():
# make sure data = 1 row
# pickle.loads(sel_data.data)[3] = 0
try:
(mytype, selfid, obj, row_from) = pickle.loads(sel_data.get_data())
except ValueError:
return
# make sure this is the correct DND type for this object
if mytype == self._DND_TYPE.drag_type:
# determine the destination row
row = self._find_row(x, y)
# if this is same object, we have a move, otherwise,
# it is a standard drag-n-drop
if id(self) == selfid and self.get_selected() is not None:
self._move(row_from, row, obj)
else:
self._handle_drag(row, obj)
self.rebuild()
elif self._DND_EXTRA and mytype == self._DND_EXTRA.drag_type:
self.handle_extra_type(mytype, obj)
def tree_drag_motion(self, *args):
"""
On drag motion one wants the list to show as the database
representation so it is clear how save will change the data
"""
if not self.dbsort:
self.columns[0].clicked()
def find_index(self, obj):
"""
Returns the index of the object within the associated data.
This will be a path (groupindex, index)
"""
data = self.get_data()
groupindex = None
index = None
for groupindex, group in enumerate(data):
try:
index = group.index(obj[1])
break
except ValueError:
pass
return (groupindex, index)
def _find_row(self, x, y):
"""
Return a path as [groupindex, index] of the row on x,y.
If no row, then a new line in the working group is returned
"""
dest = self.tree.get_dest_row_at_pos(x, y)
if dest is None:
# Below last item in list
if self.is_empty():
return [self._WORKGROUP, 0]
else:
return [self._WORKGROUP, len(self.get_data()[self._WORKGROUP])]
else:
path = dest[0].get_indices()
wgroup = path[0]
if len(path) == 1:
# On a heading
if dest[1] == Gtk.TreeViewDropPosition.BEFORE:
if wgroup != 0:
# If before then put at end of previous group
return (wgroup-1, len(self.get_data()[wgroup-1]))
else:
# unless it is the first group
return (wgroup, 0)
else:
return (wgroup, 0)
else:
if dest[1] in (Gtk.TreeViewDropPosition.BEFORE,
Gtk.TreeViewDropPosition.INTO_OR_BEFORE):
return (wgroup, path[1])
else:
return (wgroup, path[1]+1)
def _handle_drag(self, row, obj):
"""
drag from external place to row of obj
"""
if row[0] == self._WORKGROUP:
self.get_data()[self._WORKGROUP].insert(row[1], obj)
self.changed = True
else:
self.dropnotworkgroup(row, obj)
def dropnotworkgroup(self, row, obj):
"""
Drop of obj on row that is not WORKGROUP
"""
pass
def _move(self, row_from, row_to, obj):
"""
Drag and drop move of the order. Allow in workgroup
"""
if row_from[0] == row_to[0] and row_from[0] == self._WORKGROUP:
dlist = self.get_data()[self._WORKGROUP]
if row_from[1] < row_to[1]:
dlist.insert(row_to[1], obj)
del dlist[row_from[1]]
else:
del dlist[row_from[1]]
dlist.insert(row_to[1], obj)
self.changed = True
elif row_from[0] == self._WORKGROUP:
self.move_away_work(row_from, row_to, obj)
elif row_to[0] == self._WORKGROUP:
self.move_to_work(row_from, row_to, obj)
def move_away_work(self, row_from, row_to, obj):
"""
move from the workgroup to a not workgroup
handle in inherited class, default is nothing changes
"""
pass
def move_to_work(self, row_from, row_to, obj):
"""
move from a non workgroup to the workgroup
handle in inherited class, default is nothing changes
"""
pass
def _move_up(self, row_from, obj, selmethod=None):
"""
Move the item a position up in the EmbeddedList.
Eg: 0,1,2,3 needs to become 0,2,1,3, here row_from = 2
"""
if row_from[0] == self._WORKGROUP:
if selmethod :
dlist = selmethod()
else :
dlist = self.get_data()[self._WORKGROUP]
del dlist[row_from[1]]
dlist.insert(row_from[1]-1, obj)
self.changed = True
self.rebuild()
#select the row
path = (self._WORKGROUP, row_from[1]-1)
self.tree.get_selection().select_path(path)
GLib.idle_add(self.tree.scroll_to_cell, path)
else:
self._move_up_notwork(row_from, obj, selmethod)
def _move_up_notwork(self, row_from, obj, selmethod=None):
"""
move up outside of workgroup
"""
pass
def _move_up_group(self, groupindex):
"""
move up pressed on the group
"""
pass
def _move_down(self, row_from, obj, selmethod=None):
"""
Move the item a position down in the EmbeddedList.
Eg: 0,1,2,3 needs to become 0,2,1,3, here row_from = 1
"""
if row_from[0] == self._WORKGROUP:
if selmethod :
dlist = selmethod()
else :
dlist = self.get_data()[self._WORKGROUP]
del dlist[row_from[1]]
dlist.insert(row_from[1]+1, obj)
self.changed = True
self.rebuild()
#select the row
path = (self._WORKGROUP, row_from[1]+1)
self.tree.get_selection().select_path(path)
GLib.idle_add(self.tree.scroll_to_cell, path)
else:
self._move_down_notwork(row_from, obj, selmethod)
def _move_down_notwork(self, row_from, obj, selmethod=None):
"""
move down outside of workgroup
"""
pass
def _move_down_group(self, groupindex):
"""
move down pressed on the group
"""
pass
def get_icon_name(self):
"""
Specifies the basic icon used for a generic list. Typically,
a derived class will override this. The icon chosen is the
STOCK_JUSTIFY_FILL icon, which in the default GTK style
looks kind of like a list.
"""
return Gtk.STOCK_JUSTIFY_FILL
def del_button_clicked(self, obj):
ref = self.get_selected()
if ref and ref[1] is not None:
if ref[0]==self._WORKGROUP:
ref_list = self.get_data()[self._WORKGROUP]
ref_list.remove(ref[1])
self.changed = True
self.rebuild()
else:
self.del_notwork(ref)
def del_notwork(self, ref):
"""
delete of ref asked that is not part of workgroup
"""
pass
def up_button_clicked(self, obj):
ref = self.get_selected()
if ref and ref[1] is not None:
pos = self.find_index(ref)
if pos[1] > 0 :
self._move_up(pos, ref[1])
elif ref and ref[1] is None:
self._move_up_group(ref[0])
def down_button_clicked(self, obj):
ref = self.get_selected()
if ref and ref[1] is not None:
pos = self.find_index(ref)
if pos[1] >=0 and pos[1] < len(self.get_data()[pos[0]])-1:
self._move_down(pos, ref[1])
elif ref and ref[1] is None:
self._move_down_group(ref[0])
| gpl-2.0 | -4,655,648,685,835,140,000 | 34.073892 | 86 | 0.523596 | false |
mdevaev/emonoda | emonoda/plugins/confetti/pushover.py | 1 | 3447 | """
Emonoda -- A set of tools to organize and manage your torrents
Copyright (C) 2015 Devaev Maxim <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib.parse
from typing import List
from typing import Dict
from typing import Any
from ...optconf import Option
from ...optconf import SecretOption
from ...optconf.converters import as_string_list
from ...optconf.converters import as_path_or_empty
from . import STATUSES
from . import ResultsType
from . import WithWeb
from . import WithStatuses
from . import templated
# =====
class Plugin(WithWeb, WithStatuses):
PLUGIN_NAMES = ["pushover"]
def __init__( # pylint: disable=super-init-not-called
self,
user_key: str,
api_key: str,
devices: List[str],
title: str,
template: str,
**kwargs: Any,
) -> None:
self._init_bases(**kwargs)
self._init_opener()
self.__user_key = user_key
self.__api_key = api_key
self.__devices = devices
self.__title = title
self.__template_path = template
@classmethod
def get_options(cls) -> Dict[str, Option]:
return cls._get_merged_options({
"user_key": SecretOption(default="CHANGE_ME", help="User key"),
"api_key": SecretOption(default="CHANGE_ME", help="API/Application key"),
"devices": Option(default=[], type=as_string_list, help="Devices list (empty for all)"),
"title": Option(default="Emonoda ({source})", help="Message title"),
"template": Option(default="", type=as_path_or_empty, help="Mako template file name"),
})
def send_results(self, source: str, results: ResultsType) -> None:
for status in self._statuses:
for (file_name, result) in results[status].items():
post = {
"token": self.__api_key,
"user": self.__user_key,
"html": "1",
"title": self.__title.format(source=source),
"message": templated(
name=(self.__template_path if self.__template_path else "pushover.{source}.mako").format(source=source),
built_in=(not self.__template_path),
source=source,
file_name=file_name,
status=status,
status_msg=STATUSES[status],
result=result,
),
}
if self.__devices:
post["device"] = ",".join(self.__devices)
self._read_url(
url="https://api.pushover.net/1/messages.json",
data=urllib.parse.urlencode(post).encode("utf-8"),
)
| gpl-3.0 | 4,823,935,925,674,717,000 | 35.670213 | 128 | 0.577314 | false |
yusukemurayama/ppytrading | ppyt/filters/historical_filters.py | 1 | 1741 | # coding: utf-8
import logging
from sqlalchemy.sql import func
from ppyt.filters import FilterBase
from ppyt.models.orm import start_session, History
logger = logging.getLogger(__name__)
class AverageVolumeFilter(FilterBase):
"""平均出来形で銘柄を絞り込むクラスです。"""
_findkey = '平均出来高フィルタ' # フィルタを一意に特定できる名前をつけます。
def _setup(self, volume=None):
"""初期化処理を行います。
Args:
volume: 平均出来高の閾値
Raises:
ArgumentError: 引数チェックに引っかかった場合に発生します。
"""
self._is_valid_argument('volume', volume, int)
self.volume = float(volume)
def _filter_stocks(self, stocks):
"""銘柄を絞り込みます。
絞り込み条件:
- 過去の平均出来高が規定の値を上回っている。
Args:
stocks: 絞り込み前の銘柄のリスト
Returns:
絞り込み後の銘柄のリスト
"""
filtered_stocks = []
with start_session() as session:
for s in stocks:
avg_volume = session.query(func.avg(History.volume)) \
.filter_by(symbol=s.symbol).scalar()
logger.debug('symbol - avg_volume: {} - {}'.format(
s.symbol, avg_volume))
if avg_volume is not None and float(avg_volume) >= self.volume:
# 過去の平均出来高が規定値を上回っている場合、絞り込み後のリストに追加します。
filtered_stocks.append(s)
return filtered_stocks
| mit | -2,557,690,650,821,079,000 | 24.240741 | 79 | 0.567865 | false |
Magic-Translater/Pwntools.Doc.In.Zh-cn | source/conf.py | 1 | 11053 | # -*- coding: utf-8 -*-
#
# pwntools documentation build configuration file, created by
# sphinx-quickstart on Wed May 28 15:00:52 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
build_dash = tags.has('dash')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
import pwnlib
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'pwnlib.internal.dochelper',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.linkcode',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
'sphinxcontrib.autoprogram',
'sphinxcontrib.napoleon'
]
doctest_global_setup = '''
import sys, os
os.environ['PWNLIB_NOTERM'] = '1'
os.environ['PWNLIB_RANDOMIZE'] = '0'
import pwnlib
pwnlib.context.context.reset_local()
pwnlib.context.ContextType.defaults['log_level'] = 'ERROR'
pwnlib.context.ContextType.defaults['randomize'] = False
pwnlib.term.text.when = 'never'
pwnlib.log.install_default_handler()
pwnlib.log.rootlogger.setLevel(1)
# Sphinx modifies sys.stdout, and context.log_terminal has
# a reference to the original instance. We need to update
# it for logging to be captured.
class stdout(object):
def __getattr__(self, name):
return getattr(sys.stdout, name)
def __setattr__(self, name, value):
return setattr(sys.stdout, name, value)
pwnlib.context.ContextType.defaults['log_console'] = stdout()
'''
autodoc_member_order = 'alphabetical'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
doctest_test_doctest_blocks = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pwntools'
copyright = u'2016, Gallopsled et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = pwnlib.__version__
version = release.rsplit('.', 1)[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = "zh_CN"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = not build_dash
# If false, no index is generated.
html_use_index = not build_dash
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pwntoolsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pwntools.tex', u'pwntools Documentation',
u'2016, Gallopsled et al.', 'manual'),
]
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None),
'paramiko': ('https://paramiko-docs.readthedocs.org/en/1.15/', None)}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pwntools', u'pwntools Documentation',
[u'2016, Gallopsled et al.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pwntools', u'pwntools Documentation',
u'', 'pwntools', 'CTF exploit writing toolkit.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
branch = release
try:
git_branch = subprocess.check_output('git describe --tags', shell = True)
except subprocess.CalledProcessError:
git_branch = '-'
try:
if '-' in git_branch:
branch = subprocess.check_output('git rev-parse HEAD', shell = True).strip()[:10]
except subprocess.CalledProcessError:
pass
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
import importlib, inspect, types
mod = importlib.import_module(info['module'])
# Try to find the value
val = mod
for k in info['fullname'].split('.'):
val = getattr(val, k, None)
if val == None:
break
# Special case for shellcraft
if info['module'].startswith('pwnlib.shellcraft.'):
filename = 'pwnlib/shellcraft/templates/%s' % val._relpath
# Case for everything else
else:
filename = info['module'].replace('.', '/') + '.py'
if isinstance(val, (types.ModuleType, types.ClassType, types.MethodType, types.FunctionType, types.TracebackType, types.FrameType, types.CodeType)):
try:
lines, first = inspect.getsourcelines(val)
filename += '#L%d-%d' % (first, first + len(lines) - 1)
except IOError:
pass
return "https://github.com/Gallopsled/pwntools/blob/%s/%s" % (branch, filename)
# The readthedocs theme is used by the Dash generator. (Can be used for HTML too.)
if build_dash:
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
| mit | -6,300,908,279,611,668,000 | 30.670487 | 156 | 0.689406 | false |
auready/django | tests/auth_tests/test_forms.py | 2 | 34292 | import datetime
import re
from unittest import mock
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_login_failed
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field, IntegerField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from .models.custom_user import (
CustomUser, CustomUserWithoutIsActiveField, ExtensionUser,
)
from .models.with_custom_email_field import CustomEmailField
from .models.with_integer_username import IntegerUsernameUser
from .settings import AUTH_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='[email protected]')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
cls.u3 = User.objects.create_user(username='staff', password='password')
cls.u4 = User.objects.create(username='empty_password', password='')
cls.u5 = User.objects.create(username='unmanageable_password', password='$')
cls.u6 = User.objects.create(username='unknown_password', password='foo$bar')
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[str(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': '[email protected]',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: [email protected]>')
def test_unicode_username(self):
data = {
'username': '宝',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, '宝')
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = 'testΩ' # U+2126 OHM SIGN
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, 'testΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_duplicate_normalized_unicode(self):
"""
To prevent almost identical usernames, visually identical but differing
by their unicode code points only, Unicode NFKC normalization should
make appear them equal to Django.
"""
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password='pwd')
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'], ["A user with that username already exists."]
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
def test_custom_form(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = ExtensionUser
fields = UserCreationForm.Meta.fields + ('date_of_birth',)
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_with_different_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'date_of_birth')
data = {
'email': '[email protected]',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_hidden_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUserWithoutIsActiveField
fields = ('email',) # without USERNAME_FIELD
data = {
'email': '[email protected]',
'password1': 'testclient',
'password2': 'testclient',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password1': ' testpassword ',
'password2': ' testpassword ',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_password_help_text(self):
form = UserCreationForm()
self.assertEqual(
form.fields['password1'].help_text,
'<ul><li>Your password can't be too similar to your other personal information.</li></ul>'
)
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
def test_login_failed(self):
signal_calls = []
def signal_handler(**kwargs):
signal_calls.append(kwargs)
user_login_failed.connect(signal_handler)
fake_request = object()
try:
form = AuthenticationForm(fake_request, {
'username': 'testclient',
'password': 'incorrect',
})
self.assertFalse(form.is_valid())
self.assertIs(signal_calls[0]['request'], fake_request)
finally:
user_login_failed.disconnect(signal_handler)
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_unicode_username(self):
User.objects.create_user(username='Σαρα', password='pwd')
data = {
'username': 'Σαρα',
'password': 'pwd',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password': ' pass ',
}
form = AuthenticationForm(None, data)
form.is_valid() # Not necessary to have valid credentails for the test.
self.assertEqual(form.cleaned_data['password'], data['password'])
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_integer_username(self):
class CustomAuthenticationForm(AuthenticationForm):
username = IntegerField()
user = IntegerUsernameUser.objects.create_user(username=0, password='pwd')
data = {
'username': 0,
'password': 'pwd',
}
form = CustomAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['username'], data['username'])
self.assertEqual(form.cleaned_data['password'], data['password'])
self.assertEqual(form.errors, {})
self.assertEqual(form.user_cache, user)
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["new_password2"].errors,
[str(form.error_messages['password_mismatch'])]
)
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': ' password ',
'new_password2': ' password ',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_help_text_translation(self):
french_help_texts = [
'Votre mot de passe ne peut pas trop ressembler à vos autres informations personnelles.',
'Votre mot de passe doit contenir au minimum 12 caractères.',
]
form = SetPasswordForm(self.u1)
with translation.override('fr'):
html = form.as_p()
for french_text in french_help_texts:
self.assertIn(french_text, html)
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors, [str(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors, [str(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2'])
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
user.set_password(' oldpassword ')
data = {
'old_password': ' oldpassword ',
'new_password1': ' pass ',
'new_password2': ' pass ',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['old_password'], data['old_password'])
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
# original hashed password contains $
self.assertIn('$', form.cleaned_data['password'])
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_custom_form(self):
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = ExtensionUser
fields = ('username', 'password', 'date_of_birth',)
user = User.objects.get(username='testclient')
data = {
'username': 'testclient',
'password': 'testclient',
'date_of_birth': '1998-02-24',
}
form = CustomUserChangeForm(data, instance=user)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['username'], 'testclient')
self.assertEqual(form.cleaned_data['date_of_birth'], datetime.date(1998, 2, 24))
@override_settings(TEMPLATES=AUTH_TEMPLATES)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = '[email protected]'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': '[email protected]'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['[email protected]'],
headers={'Reply-To': '[email protected]'},
alternatives=[
("Really sorry to hear you forgot your password.", "text/html")
],
).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['[email protected]'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', '[email protected]', 'test')
self.assertEqual(user.email, '[email protected]')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', '[email protected]', 'test')
data = {"email": "[email protected]"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(
r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload()
))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_custom_email_field(self):
email = '[email protected]'
CustomEmailField.objects.create_user('test name', 'test password', email)
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [email])
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'password1': ' pass ',
'password2': ' pass ',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
| bsd-3-clause | -8,254,887,055,535,520,000 | 38.944056 | 119 | 0.616743 | false |