repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mpi2/PhenotypeData | external_tools/src/main/python/images/DeleteFilesNotInSolr.py | 1 | 8714 | #!/usr/bin/python
"""Delete file not in Solr ..."""
import os
import requests
import json
import sys
import os.path
import sys
import argparse
import shutil
from common import splitString
from DirectoryWalker import DirectoryWalker
import itertools
from OmeroPropertiesParser import OmeroPropertiesParser
responseFailed=0
numberOfImageDownloadAttemps=0
totalNumberOfImagesWeHave=0
numFoundInSolr=0
uniquePathsFromSolr=set()
uniqueFilesWithPathsFromNfs=set()
def main(argv):
parser = argparse.ArgumentParser(
description='Delete files not in Solr'
)
parser.add_argument('-d', '--rootDestinationDir', dest='rootDestinationDir',
help='Directory for root of destination to store images' )
parser.add_argument('-s', '--rootSolrUrl', dest='rootSolrUrl',
help='URL to root of solr index'
)
parser.add_argument('-H', '--host', dest='komp2Host',
help='Hostname for server hosting komp2 db'
)
parser.add_argument('-p', '--port', dest='komp2Port',
help='Port by which to connect to komp2 db'
)
parser.add_argument('-u', '--user', dest='komp2User',
help='Username for connecting to komp2 db'
)
parser.add_argument('-db', '--database', dest='komp2Db',
help='Database to connect to for komp2db'
)
parser.add_argument('--pass', dest='komp2Pass',
help='Password for komp2db'
)
parser.add_argument('--profile', dest='profile', default='dev',
help='profile from which to read config: dev, prod, live, ...')
args = parser.parse_args()
# Get values from property file and use as defaults that can be overridden
# by command line parameters
try:
pp = OmeroPropertiesParser(args.profile)
omeroProps = pp.getOmeroProps()
except:
omeroProps = {}
rootSolrUrl = args.rootSolrUrl if args.rootSolrUrl <> None else omeroProps['solrurl']
komp2Host = args.komp2Host if args.komp2Host<>None else omeroProps['komp2host']
komp2Port = args.komp2Port if args.komp2Port<>None else omeroProps['komp2port']
komp2db = args.komp2Db if args.komp2Db<>None else omeroProps['komp2db']
komp2User = args.komp2User if args.komp2User<>None else omeroProps['komp2user']
komp2Pass = args.komp2Pass if args.komp2Pass<>None else omeroProps['komp2pass']
rootDestinationDir = args.rootDestinationDir if args.rootDestinationDir<>None else omeroProps['rootdestinationdir']
#note cant split this url over a few lines as puts in newlines into url which doesn't work
#solrQuery="""experiment/select?q=observation_type:image_record&fq=download_file_path:(download_file_path:*bhjlk01.jax.org/images/IMPC_ALZ_001/*%20AND%20!download_file_path:*.mov)&fl=id,download_file_path,phenotyping_center,pipeline_stable_id,procedure_stable_id,datasource_name,parameter_stable_id&wt=json&indent=on&rows=10000000"""
solrQuery="""experiment/select?q=observation_type:image_record&fq=(download_file_path:*mousephenotype.org*%20AND%20!download_file_path:*.mov)&fl=id,download_file_path,phenotyping_center,pipeline_stable_id,procedure_stable_id,datasource_name,parameter_stable_id&wt=json&indent=on&rows=1000000"""
print("running python image copy script for impc images")
print 'rootDestinationDir is "', rootDestinationDir
solrUrl=rootSolrUrl+solrQuery;
print 'solrUrl', solrUrl
getPathsFromSolr(solrUrl, rootDestinationDir)
#for solrPath in itertools.islice(uniquePathsFromSolr, 3):
#print "solrPath="+solrPath
dirWalker=DirectoryWalker()
rel_directory_to_filenames_map=dirWalker.getFilesFromDir(rootDestinationDir)
n=10
for key, value in rel_directory_to_filenames_map.items():
count=0
for name in value:
filePath=rootDestinationDir+key+"/"+name
uniqueFilesWithPathsFromNfs.add(filePath)
count+=1
#if count<= n: print "filePath="+str(filePath)
print "uniquePathsFromSolr length=",len(uniquePathsFromSolr)
print "uniqueFilesWithPathsFromNfs=", len(uniqueFilesWithPathsFromNfs)
filesNotInSolrAnymore=uniqueFilesWithPathsFromNfs.difference(uniquePathsFromSolr)
print "files not in solr anymore size=",len(filesNotInSolrAnymore)
for currentPath in filesNotInSolrAnymore:
print "trying to move this file="+currentPath
destinationPath=currentPath.replace("/nfs/komp2/web/images/clean/impc/", "/nfs/komp2/web/images/impc/")
print "destPath="+destinationPath
moveFile(currentPath, destinationPath)
#os.remove(filePath) maybe move to another directory rather than delete
#runWithSolrAsDataSource(solrUrl, cnx, rootDestinationDir)
def getPathsFromSolr(solrUrl, rootDestinationDir):
"""
need to get these passed in as arguments - the host and db name etc for jenkins to run
first get the list of download urls and the data source, experiment, procdure and parameter and observation id for the images
"""
v = json.loads(requests.get(solrUrl).text)
docs=v['response']['docs']
numFoundInSolr=v['response']['numFound']
for doc in docs:
download_file_path=doc['download_file_path']
datasource_id=doc['datasource_name']
phenotyping_center=doc['phenotyping_center']
#experiment=doc['experiment']
pipeline_stable_id=doc['pipeline_stable_id']
observation_id=doc['id']
procedure_stable_id=doc['procedure_stable_id']
parameter_stable_id=doc['parameter_stable_id']
global rootDesitinationdir
getUniqueFilePathsFromSolr(observation_id, rootDestinationDir,phenotyping_center,pipeline_stable_id, procedure_stable_id, parameter_stable_id, download_file_path)
print 'number found in solr='+str(numFoundInSolr)+' number of failed responses='+str(responseFailed)+' number of requests='+str(numberOfImageDownloadAttemps)+' total totalNumberOfImagesWeHave='+str(totalNumberOfImagesWeHave)
def createDestinationFilePath(rootDestinationDir, phenotyping_center, pipeline_stable_id, procedure, parameter, download_file_path):
directory="/".join([rootDestinationDir,phenotyping_center, pipeline_stable_id,procedure,parameter])
return directory
def getUniqueFilePathsFromSolr(observation_id, rootDestinationDir, phenotyping_center,pipeline_stable_id, procedure, parameter, downloadFilePath):
global totalNumberOfImagesWeHave
global responseFailed
global numberOfImageDownloadAttemps
directory = createDestinationFilePath(rootDestinationDir, phenotyping_center, pipeline_stable_id, procedure,parameter, downloadFilePath)
#print "directory "+str(directory)
dstfilename=directory+"/"+str(downloadFilePath.split('/')[-1])
#print "dstfilename="+str(dstfilename)
#/nfs/komp2/web/images/impc/MRC Harwell/HRWL_001/IMPC_XRY_001/IMPC_XRY_034_001/114182.dcm
# new file paths are /nfs/public/ro/pheno-archive-images/images/impc
if dstfilename in uniquePathsFromSolr:
print '---------------------!!!!!!!!!!error the filePath is not unique and has been specified before:'+dstfilename
uniquePathsFromSolr.add(dstfilename.replace("impc//", "impc/"))#hack to remove the extra slash after impc but don't want to effect other module used by other code
#destDirectory=os.path.dirname(destPath)
#print "destination directory for copy is "+destDirectory
#if not os.path.exists(destDirectory):
# os.makedirs(destDirectory)
#print 'saving file to '+destPath
#if not os.path.isfile(destPath):
# try:
# shutil.copyfile(dstfilename,destPath)
# except IOError:
# print "file does not exist "+str(dstfilename)+" continuing"
#totalNumberOfImagesWeHave=totalNumberOfImagesWeHave+1
#if totalNumberOfImagesWeHave%1000==0 :
# print "totalNumber of images we have="+str(totalNumberOfImagesWeHave)
def moveFile(currentPath, destinationPath):
filename=str(destinationPath.split('/')[-1])
destDirectory=destinationPath.replace(filename, "")
print "making directory="+destDirectory
if not os.path.exists(destDirectory):
os.makedirs(destDirectory)
print 'moving file to '+destinationPath
if not os.path.isfile(destinationPath):
try:
shutil.move(currentPath,destinationPath)
except IOError:
print "file does not exist "+str(currentPath)+" continuing"
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 | -614,075,374,023,222,100 | 48.511364 | 337 | 0.697269 | false |
sajeeshcs/nested_projects_keystone | keystone/token/controllers.py | 1 | 20208 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import sys
from keystoneclient.common import cms
from oslo.utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.token import provider
CONF = config.CONF
LOG = log.getLogger(__name__)
class ExternalAuthNotApplicable(Exception):
"""External authentication is not applicable."""
pass
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'token_provider_api', 'trust_api')
class Auth(controller.V2Controller):
@controller.v2_deprecated
def ca_cert(self, context, auth=None):
ca_file = open(CONF.signing.ca_certs, 'r')
data = ca_file.read()
ca_file.close()
return data
@controller.v2_deprecated
def signing_cert(self, context, auth=None):
cert_file = open(CONF.signing.certfile, 'r')
data = cert_file.read()
cert_file.close()
return data
@controller.v2_deprecated
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
Accept auth as a dict that looks like::
{
"auth":{
"passwordCredentials":{
"username":"test_user",
"password":"mypass"
},
"tenantName":"customer-x"
}
}
In this case, tenant is optional, if not provided the token will be
considered "unscoped" and can later be used to get a scoped token.
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
if auth is None:
raise exception.ValidationError(attribute='auth',
target='request body')
if "token" in auth:
# Try to authenticate using a token
auth_info = self._authenticate_token(
context, auth)
else:
# Try external authentication
try:
auth_info = self._authenticate_external(
context, auth)
except ExternalAuthNotApplicable:
# Try local authentication
auth_info = self._authenticate_local(
context, auth)
user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id = auth_info
# Validate that the auth info is valid and nothing is disabled
try:
self.identity_api.assert_user_enabled(
user_id=user_ref['id'], user=user_ref)
self.assignment_api.assert_domain_enabled(
domain_id=user_ref['domain_id'])
if tenant_ref:
self.assignment_api.assert_project_enabled(
project_id=tenant_ref['id'], project=tenant_ref)
except AssertionError as e:
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
# NOTE(morganfainberg): Make sure the data is in correct form since it
# might be consumed external to Keystone and this is a v2.0 controller.
# The user_ref is encoded into the auth_token_data which is returned as
# part of the token data. The token provider doesn't care about the
# format.
user_ref = self.v3_to_v2_user(user_ref)
if tenant_ref:
tenant_ref = self.filter_domain_id(tenant_ref)
auth_token_data = self._get_auth_token_data(user_ref,
tenant_ref,
metadata_ref,
expiry,
audit_id)
if tenant_ref:
catalog_ref = self.catalog_api.get_catalog(
user_ref['id'], tenant_ref['id'], metadata_ref)
else:
catalog_ref = {}
auth_token_data['id'] = 'placeholder'
if bind:
auth_token_data['bind'] = bind
roles_ref = []
for role_id in metadata_ref.get('roles', []):
role_ref = self.assignment_api.get_role(role_id)
roles_ref.append(dict(name=role_ref['name']))
(token_id, token_data) = self.token_provider_api.issue_v2_token(
auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref)
# NOTE(wanghong): We consume a trust use only when we are using trusts
# and have successfully issued a token.
if CONF.trust.enabled and 'trust_id' in auth:
self.trust_api.consume_use(auth['trust_id'])
return token_data
def _authenticate_token(self, context, auth):
"""Try to authenticate using an already existing token.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'token' not in auth:
raise exception.ValidationError(
attribute='token', target='auth')
if "id" not in auth['token']:
raise exception.ValidationError(
attribute="id", target="token")
old_token = auth['token']['id']
if len(old_token) > CONF.max_token_size:
raise exception.ValidationSizeError(attribute='token',
size=CONF.max_token_size)
try:
token_model_ref = token_model.KeystoneToken(
token_id=old_token,
token_data=self.token_provider_api.validate_token(old_token))
except exception.NotFound as e:
raise exception.Unauthorized(e)
wsgi.validate_token_bind(context, token_model_ref)
# A trust token cannot be used to get another token
if token_model_ref.trust_scoped:
raise exception.Forbidden()
user_id = token_model_ref.user_id
tenant_id = self._get_project_id_from_auth(auth)
if not CONF.trust.enabled and 'trust_id' in auth:
raise exception.Forbidden('Trusts are disabled.')
elif CONF.trust.enabled and 'trust_id' in auth:
trust_ref = self.trust_api.get_trust(auth['trust_id'])
if trust_ref is None:
raise exception.Forbidden()
if user_id != trust_ref['trustee_user_id']:
raise exception.Forbidden()
if (trust_ref['project_id'] and
tenant_id != trust_ref['project_id']):
raise exception.Forbidden()
if ('expires' in trust_ref) and (trust_ref['expires']):
expiry = trust_ref['expires']
if expiry < timeutils.parse_isotime(timeutils.isotime()):
raise exception.Forbidden()()
user_id = trust_ref['trustor_user_id']
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if not trustor_user_ref['enabled']:
raise exception.Forbidden()()
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if not trustee_user_ref['enabled']:
raise exception.Forbidden()()
if trust_ref['impersonation'] is True:
current_user_ref = trustor_user_ref
else:
current_user_ref = trustee_user_ref
else:
current_user_ref = self.identity_api.get_user(user_id)
metadata_ref = {}
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = token_model_ref.expires
if CONF.trust.enabled and 'trust_id' in auth:
trust_id = auth['trust_id']
trust_roles = []
for role in trust_ref['roles']:
if 'roles' not in metadata_ref:
raise exception.Forbidden()()
if role['id'] in metadata_ref['roles']:
trust_roles.append(role['id'])
else:
raise exception.Forbidden()
if 'expiry' in trust_ref and trust_ref['expiry']:
trust_expiry = timeutils.parse_isotime(trust_ref['expiry'])
if trust_expiry < expiry:
expiry = trust_expiry
metadata_ref['roles'] = trust_roles
metadata_ref['trustee_user_id'] = trust_ref['trustee_user_id']
metadata_ref['trust_id'] = trust_id
bind = token_model_ref.bind
audit_id = token_model_ref.audit_chain_id
return (current_user_ref, tenant_ref, metadata_ref, expiry, bind,
audit_id)
def _authenticate_local(self, context, auth):
"""Try to authenticate against the identity backend.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'passwordCredentials' not in auth:
raise exception.ValidationError(
attribute='passwordCredentials', target='auth')
if "password" not in auth['passwordCredentials']:
raise exception.ValidationError(
attribute='password', target='passwordCredentials')
password = auth['passwordCredentials']['password']
if password and len(password) > CONF.identity.max_password_length:
raise exception.ValidationSizeError(
attribute='password', size=CONF.identity.max_password_length)
if ("userId" not in auth['passwordCredentials'] and
"username" not in auth['passwordCredentials']):
raise exception.ValidationError(
attribute='username or userId',
target='passwordCredentials')
user_id = auth['passwordCredentials'].get('userId')
if user_id and len(user_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='userId',
size=CONF.max_param_size)
username = auth['passwordCredentials'].get('username', '')
if username:
if len(username) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='username',
size=CONF.max_param_size)
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
try:
user_ref = self.identity_api.authenticate(
context,
user_id=user_id,
password=password)
except AssertionError as e:
raise exception.Unauthorized(e.args[0])
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
bind = None
audit_id = None
return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id)
def _authenticate_external(self, context, auth):
"""Try to authenticate an external user via REMOTE_USER variable.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
environment = context.get('environment', {})
if not environment.get('REMOTE_USER'):
raise ExternalAuthNotApplicable()
# NOTE(jamielennox): xml and json differ and get confused about what
# empty auth should look like so just reset it.
if not auth:
auth = {}
username = environment['REMOTE_USER']
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
bind = None
if ('kerberos' in CONF.token.bind and
environment.get('AUTH_TYPE', '').lower() == 'negotiate'):
bind = {'kerberos': username}
audit_id = None
return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id)
def _get_auth_token_data(self, user, tenant, metadata, expiry, audit_id):
return dict(user=user,
tenant=tenant,
metadata=metadata,
expires=expiry,
parent_audit_id=audit_id)
def _get_project_id_from_auth(self, auth):
"""Extract tenant information from auth dict.
Returns a valid tenant_id if it exists, or None if not specified.
"""
tenant_id = auth.get('tenantId')
if tenant_id and len(tenant_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantId',
size=CONF.max_param_size)
tenant_name = auth.get('tenantName')
if tenant_name and len(tenant_name) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantName',
size=CONF.max_param_size)
if tenant_name:
try:
tenant_ref = self.assignment_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
tenant_id = tenant_ref['id']
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
return tenant_id
def _get_project_roles_and_ref(self, user_id, tenant_id):
"""Returns the project roles for this user, and the project ref."""
tenant_ref = None
role_list = []
if tenant_id:
try:
tenant_ref = self.assignment_api.get_project(tenant_id)
role_list = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
except exception.ProjectNotFound:
pass
if not role_list:
msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s')
msg = msg % {'u_id': user_id, 't_id': tenant_id}
LOG.warning(msg)
raise exception.Unauthorized(msg)
return (tenant_ref, role_list)
def _get_token_ref(self, token_id, belongs_to=None):
"""Returns a token if a valid one exists.
Optionally, limited to a token owned by a specific tenant.
"""
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.token_provider_api.validate_token(token_id))
if belongs_to:
if not token_ref.project_scoped:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
if token_ref.project_id != belongs_to:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
return token_ref
@controller.v2_deprecated
@controller.protected()
def validate_token_head(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Identical to ``validate_token``, except does not return a response.
The code in ``keystone.common.wsgi.render_response`` will remove
the content body.
"""
# TODO(ayoung) validate against revocation API
belongs_to = context['query_string'].get('belongsTo')
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
@controller.protected()
def validate_token(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Returns metadata about the token along any associated roles.
"""
belongs_to = context['query_string'].get('belongsTo')
# TODO(ayoung) validate against revocation API
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
def delete_token(self, context, token_id):
"""Delete a token, effectively invalidating it for authz."""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
self.token_provider_api.revoke_token(token_id)
@controller.v2_deprecated
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if expires and isinstance(expires, datetime.datetime):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
@controller.v2_deprecated
def endpoints(self, context, token_id):
"""Return a list of endpoints available to the token."""
self.assert_admin(context)
token_ref = self._get_token_ref(token_id)
catalog_ref = None
if token_ref.project_id:
catalog_ref = self.catalog_api.get_catalog(
token_ref.user_id,
token_ref.project_id,
token_ref.metadata)
return Auth.format_endpoint_list(catalog_ref)
@classmethod
def format_endpoint_list(cls, catalog_ref):
"""Formats a list of endpoints according to Identity API v2.
The v2.0 API wants an endpoint list to look like::
{
'endpoints': [
{
'id': $endpoint_id,
'name': $SERVICE[name],
'type': $SERVICE,
'tenantId': $tenant_id,
'region': $REGION,
}
],
'endpoints_links': [],
}
"""
if not catalog_ref:
return {}
endpoints = []
for region_name, region_ref in six.iteritems(catalog_ref):
for service_type, service_ref in six.iteritems(region_ref):
endpoints.append({
'id': service_ref.get('id'),
'name': service_ref.get('name'),
'type': service_type,
'region': region_name,
'publicURL': service_ref.get('publicURL'),
'internalURL': service_ref.get('internalURL'),
'adminURL': service_ref.get('adminURL'),
})
return {'endpoints': endpoints, 'endpoints_links': []}
| apache-2.0 | 5,148,647,474,886,089,000 | 37.491429 | 79 | 0.566657 | false |
alshedivat/kgp | kgp/models.py | 1 | 8412 | """Gaussian Process models for Keras 2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.models import Model as KerasModel
from keras.engine.topology import _to_list
from keras.engine.training import _standardize_input_data
from .callbacks import UpdateGP
class Model(KerasModel):
"""Model that supports arbitrary structure with GP output layers.
This class extends `keras.models.Model` and allows using Gaussian Processes
as output layers. The model completely inherits the function interface
of Keras and is constructed in a standard way.
On training, GPs are optimized using empirical Bayes (log marginal
likelihood maximization) using semi-stochastic alternating scheme with
delayed kernel matrix updates [1]. Non-GP output layers can use one the
standard Keras objectives, e.g., the mean squared error.
"""
def __init__(self, inputs, outputs, name=None):
super(Model, self).__init__(inputs, outputs, name)
# List all output GP layers
self.output_gp_layers = [layer for layer in self.output_layers
if layer.name.startswith('gp')]
def compile(self, optimizer, loss, metrics=None, loss_weights=None,
sample_weight_mode=None, **kwargs):
super(Model, self).compile(optimizer, loss, metrics, loss_weights,
sample_weight_mode, **kwargs)
# Remove the metrics meaningless for GP output layers
self.metrics_tensors = [
mt for mt, mn in zip(self.metrics_tensors, self.metrics_names[1:])
if not (mn.startswith('gp') and mn.endswith('loss'))
]
self.metrics_names = [
mn for mn in self.metrics_names
if not (mn.startswith('gp') and mn.endswith('loss'))
]
# Add MSE and NLML metrics for each output GP
for gp in self.output_gp_layers:
self.metrics_tensors.extend([gp.mse, gp.nlml])
self.metrics_names.extend([gp.name + '_mse', gp.name + '_nlml'])
# Add cumulative MSE & NLML metrics
self.mse = sum([gp.mse for gp in self.output_gp_layers])
self.nlml = sum([gp.nlml for gp in self.output_gp_layers])
self.metrics_tensors.extend([self.mse, self.nlml])
self.metrics_names.extend(['mse', 'nlml'])
def transform(self, x, batch_size=32, learning_phase=0., verbose=0):
h = super(Model, self).predict(x, batch_size, learning_phase, verbose)
return _to_list(h)
def fit(self, X, Y,
batch_size=32,
epochs=1,
gp_n_iter=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
For argument details, refer to `keras.engine.training.Model.fit`.
Notes:
The following arguments are currently unsupported by models with GP
output layers:
- validation_split
- class_weight
- sample_weight
"""
# Validate user data
X, Y, _ = self._standardize_user_data(
X, Y,
sample_weight=None,
class_weight=None,
check_batch_axis=False,
batch_size=batch_size)
if validation_data is not None:
X_val, Y_val, _ = self._standardize_user_data(
*validation_data,
sample_weight=None,
class_weight=None,
check_batch_axis=False,
batch_size=batch_size)
validation_data = (X_val, Y_val)
# Setup GP updates
update_gp = UpdateGP(ins=(X, Y),
val_ins=validation_data,
batch_size=batch_size,
gp_n_iter=gp_n_iter,
verbose=verbose)
callbacks = [update_gp] + (callbacks or [])
return super(Model, self).fit(
X, Y,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
shuffle=shuffle,
initial_epoch=initial_epoch,
**kwargs)
def finetune(self, X, Y, batch_size=32, gp_n_iter=1, verbose=1):
"""Finetune the output GP layers assuming the network is pre-trained.
Arguments:
----------
X : np.ndarray or list of np.ndarrays
Y : np.ndarray or list of np.ndarrays
batch_size : uint (default: 128)
Batch size used for data streaming through the network.
gp_n_iter : uint (default: 100)
Number of iterations for GP training.
verbose : uint (default: 1)
Verbosity mode, 0 or 1.
"""
# Validate user data
X = _standardize_input_data(
X, self._feed_input_names, self._feed_input_shapes,
check_batch_axis=False, exception_prefix='input')
H = self.transform(X, batch_size=batch_size)
if verbose:
print("Finetuning output GPs...")
for gp, h, y in zip(self.output_gp_layers, H, Y):
# Update GP data (and grid if necessary)
gp.backend.update_data('tr', h, y)
if gp.update_grid:
gp.backend.update_grid('tr')
# Train GP
gp.hyp = gp.backend.train(gp_n_iter, verbose=verbose)
if verbose:
print("Done.")
def evaluate(self, X, Y, batch_size=32, verbose=0):
"""Compute NLML on the given data.
Arguments:
----------
X : np.ndarray or list of np.ndarrays
Y : np.ndarray or list of np.ndarrays
batch_size : uint (default: 128)
verbose : uint (default: 0)
Verbosity mode, 0 or 1.
Returns:
--------
nlml : float
"""
# Validate user data
X, Y, _ = self._standardize_user_data(
X, Y,
sample_weight=None,
class_weight=None,
check_batch_axis=False,
batch_size=batch_size)
H = self.transform(X, batch_size=batch_size)
nlml = 0.
for gp, h, y in zip(self.output_gp_layers, H, Y):
nlml += gp.backend.evaluate('tmp', h, y)
return nlml
def predict(self, X, X_tr=None, Y_tr=None,
batch_size=32, return_var=False, verbose=0):
"""Generate output predictions for the input samples batch by batch.
Arguments:
----------
X : np.ndarray or list of np.ndarrays
batch_size : uint (default: 128)
return_var : bool (default: False)
Whether predictive variance is returned.
verbose : uint (default: 0)
Verbosity mode, 0 or 1.
Returns:
--------
preds : a list or a tuple of lists
Lists of output predictions and variance estimates.
"""
# Update GP data if provided (and grid if necessary)
if X_tr is not None and Y_tr is not None:
X_tr, Y_tr, _ = self._standardize_user_data(
X_tr, Y_tr,
sample_weight=None,
class_weight=None,
check_batch_axis=False,
batch_size=batch_size)
H_tr = self.transform(X_tr, batch_size=batch_size)
for gp, h, y in zip(self.output_gp_layers, H_tr, Y_tr):
gp.backend.update_data('tr', h, y)
if gp.update_grid:
gp.backend.update_grid('tr')
# Validate user data
X = _standardize_input_data(
X, self._feed_input_names, self._feed_input_shapes,
check_batch_axis=False, exception_prefix='input')
H = self.transform(X, batch_size=batch_size)
preds = []
for gp, h in zip(self.output_gp_layers, H):
preds.append(gp.backend.predict(h, return_var=return_var))
if return_var:
preds = map(list, zip(*preds))
return preds
# Apply tweaks
from . import tweaks
| mit | -4,385,918,913,147,216,400 | 34.344538 | 83 | 0.551236 | false |
dstlmrk/catcher | catcher/api/middleware.py | 1 | 3340 | #!/usr/bin/python
# coding=utf-8
import falcon
import ujson
import json
from catcher import models
import datetime
from playhouse.shortcuts import model_to_dict
import logging
from catcher.models import User, NullUser
import peewee
class PeeweeConnection(object):
def process_request(self, req, resp):
models.db.connect()
def process_response(self, req, resp, resource):
if not models.db.is_closed():
models.db.close()
class Crossdomain(object):
def process_request(self, req, resp):
resp.append_header(
"Access-Control-Allow-Origin", "*"
)
resp.append_header(
"Access-Control-Allow-Headers",
"Content-Type,Authorization,X-Name"
)
resp.append_header(
"Access-Control-Allow-Methods",
"PUT,POST,DELETE,GET"
)
class Authorization(object):
def process_request(self, req, resp):
user = NullUser()
try:
if req.auth:
user = User.get(api_key=req.auth)
except User.DoesNotExist:
pass
# debug
print "LOGGED:", user
req.context["user"] = user
class RequireJSON(object):
def process_request(self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable(
'This API only supports responses encoded as JSON.'
)
if req.method in ('POST', 'PUT'):
if not req.content_type or 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(
'This API only supports requests encoded as JSON.'
)
class JSONTranslator(object):
def process_request(self, req, resp):
# req.stream corresponds to the WSGI wsgi.input environ variable,
# and allows you to read bytes from the request body
if req.content_length in (None, 0):
return # nothing to do
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest(
'Empty request body',
'A valid JSON document is required.'
)
try:
req.context['data'] = ujson.loads(body)
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPBadRequest(
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.'
)
def process_response(self, req, resp, resource):
if 'result' not in req.context:
return
resp.body = json.dumps(req.context['result'], default = self.converter)
def converter(self, obj):
if isinstance(obj, datetime.time) or isinstance(obj, datetime.date) or isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
if isinstance(obj, peewee.Model):
return model_to_dict(obj)
if isinstance(obj, models.MySQLModel):
# TODO: I don't understand this, because it doesn't work
return model_to_dict(obj)
logging.warning("Converter doesn't know how convert data (%s [%s])" % (obj, type(obj)))
return None | mit | -6,470,274,050,978,394,000 | 29.372727 | 114 | 0.580539 | false |
CharlesGarrocho/MCC | computacao_movel/DTN/server.py | 1 | 2035 | import os
import json
import time
import socket
from threading import Thread
IPS = []
def trata_cliente(conexao, endereco):
requisicao = conexao.recv(1024)
print requisicao
if requisicao == 'LIST':
arqs = os.listdir('/etc/black/garagem/arquivos/')
conexao.send(json.dumps(arqs))
elif requisicao == 'GET':
arqs = os.listdir('/etc/black/garagem/arquivos/')
arquivo = conexao.recv(1024)
if arquivo in arqs:
fp = open('/etc/black/garagem/arquivos/{0}'.format(arquivo), 'r')
strng = fp.read(1024)
while strng:
conexao.send(strng)
strng = fp.read(1024)
elif requisicao == 'PUT':
conexao.send('OK')
arqs = os.listdir('/etc/black/garagem/arquivos/')
arquivo = conexao.recv(1024)
print arquivo
print arqs
if arquivo not in arqs:
conexao.send('TRUE')
arq = open('/etc/black/garagem/arquivos/{0}'.format(arquivo), 'w')
while 1:
dados = conexao.recv(1024)
if not dados:
break
arq.write(dados)
arq.close()
else:
conexao.send('FALSE')
conexao.close()
def loop_servidor():
soquete = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soquete.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soquete.bind(('127.0.0.1', 5555))
soquete.listen(10)
global IPS
# Fica aqui aguardando novas conexoes.
while True:
# Para cada nova conexao e criado um novo processo para tratar as requisicoes.
conexao = soquete.accept()
novaConexao = []
novaConexao.append(conexao[0])
novaConexao.append(conexao[1])
if conexao[1] not in IPS:
IPS.append(conexao[1])
Thread(target=trata_cliente, args=(novaConexao)).start()
if __name__ == '__main__':
print 'Servidor de Arquivos Iniciou na Porta 5555...'
Thread(target=loop_servidor).start() | apache-2.0 | 7,516,643,370,373,497,000 | 29.848485 | 86 | 0.58231 | false |
KE-works/pykechain | pykechain/models/property.py | 1 | 22270 | from typing import Any, List, Dict, Optional, Text, Union, Tuple, Iterable, TypeVar
import requests
from jsonschema import validate
from pykechain.defaults import API_EXTRA_PARAMS
from pykechain.enums import Category
from pykechain.exceptions import APIError, IllegalArgumentError
from pykechain.models import Base, BaseInScope
from pykechain.models.input_checks import check_text, check_type
from pykechain.models.representations.component import RepresentationsComponent
from pykechain.models.validators import PropertyValidator
from pykechain.models.validators.validator_schemas import options_json_schema
from pykechain.utils import clean_empty_values, empty
T = TypeVar("T")
class Property(BaseInScope):
"""A virtual object representing a KE-chain property.
.. versionadded: 3.0
This is a `Property` to communicate with a KE-chain 3 backend.
:cvar bulk_update: flag to postpone update of properties until manually requested
:type bulk_update: bool
:ivar type: The property type of the property. One of the types described in :class:`pykechain.enums.PropertyType`
:type type: str
:ivar category: The category of the property, either `Category.MODEL` of `Category.INSTANCE`
:type category: str
:ivar description: description of the property
:type description: str or None
:ivar unit: unit of measure of the property
:type unit: str or None
:ivar model: the id of the model (not the model object)
:type model: str
:ivar output: a boolean if the value is configured as an output (in an activity)
:type output: bool
:ivar part: The (parent) part in which this property is available
:type part: :class:`Part`
:ivar value: the property value, can be set as well as property
:type value: Any
:ivar validators: the list of validators that are available in the property
:type validators: List[PropertyValidator]
:ivar is_valid: if the property conforms to the validators
:type is_valid: bool
:ivar is_invalid: if the property does not conform to the validator
:type is_invalid: bool
"""
_USE_BULK_UPDATE = False
_update_package = dict()
def __init__(self, json, **kwargs):
"""Construct a Property from a json object."""
super().__init__(json, **kwargs)
self.output: bool = json.get('output')
self.model_id: Optional[Text] = json.get('model_id')
self.part_id = json.get('part_id')
self.ref = json.get('ref')
self.type = json.get('property_type')
self.category = json.get('category')
self.description = json.get('description', None)
self.unit = json.get('unit', None)
self.order = json.get('order')
# Create protected variables
self._value: Any = json.get('value')
self._options: Dict = json.get('value_options', {})
self._part: Optional['Part'] = None
self._model: Optional['Property'] = None
self._validators: List[PropertyValidator] = []
self._validation_results: List = []
self._validation_reasons: List = []
self._representations_container = RepresentationsComponent(
self,
self._options.get('representations', {}),
self._save_representations,
)
validate(self._options, options_json_schema)
if self._options.get('validators'):
self._parse_validators()
def refresh(self, json: Optional[Dict] = None, url: Optional[Text] = None, extra_params: Optional = None) -> None:
"""Refresh the object in place."""
super().refresh(json=json,
url=self._client._build_url('property', property_id=self.id),
extra_params=API_EXTRA_PARAMS['property'])
def has_value(self) -> bool:
"""Predicate to indicate if the property has a value set.
This predicate determines if the property has a value set. It will not make a call to KE-chain API (in case
of reference properties). So it is a tiny fraction 'cheaper' in terms of processing time than checking the
`Property.value` itself.
It will return True if the property_type is a Boolean and set to a value of False.
:returns: True if the property has a value set, otherwise (also when value is None) returns False
:rtype: Bool
"""
if isinstance(self._value, (float, int, bool)):
return True # to prevent "bool(0.00) = False" or "False = False"
else:
return bool(self._value)
@property
def use_bulk_update(self):
"""Set or get the toggle to asynchronously update property values."""
# set the class attribute to make this value a singleton
return self.__class__._USE_BULK_UPDATE
@use_bulk_update.setter
def use_bulk_update(self, value):
self.__class__.set_bulk_update(value)
@classmethod
def set_bulk_update(cls, value):
"""Set global class attribute to toggle the use of bulk-updates of properties."""
assert isinstance(value, bool), "`bulk_update` must be set to a boolean, not {}".format(type(value))
cls._USE_BULK_UPDATE = value
@property
def value(self) -> Any:
"""Retrieve the data value of a property.
Setting this value will immediately update the property in KE-chain.
:returns: the value
"""
return self._value
@value.setter
def value(self, value: Any) -> None:
value = self.serialize_value(value)
if self.use_bulk_update:
self._pend_update(dict(value=value))
self._value = value
else:
self._put_value(value)
@classmethod
def update_values(cls, client: 'Client', use_bulk_update: bool = False) -> None:
"""
Perform the bulk update of property values using the stored values in the `Property` class.
:param client: Client object
:type client: Client
:param use_bulk_update: set the class attribute, defaults to False.
:type use_bulk_update: bool
:return: None
"""
if cls._USE_BULK_UPDATE:
properties = [dict(id=key, **values) for key, values in cls._update_package.items()]
client.update_properties(properties=properties)
cls._update_package = dict()
cls.set_bulk_update(use_bulk_update)
def _pend_update(self, data):
"""Store the value to be send at a later point in time using `update_values`."""
existing_data = self.__class__._update_package.get(self.id, None)
if existing_data:
existing_data.update(data)
else:
self.__class__._update_package[self.id] = data
def _put_value(self, value):
"""Send the value to KE-chain."""
url = self._client._build_url('property', property_id=self.id)
response = self._client._request('PUT', url, params=API_EXTRA_PARAMS['property'], json={'value': value})
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not update Property {}".format(self), response=response)
self.refresh(json=response.json()['results'][0])
def serialize_value(self, value: [T]) -> T:
"""
Serialize the value to be set on the property.
:param value: non-serialized value
:type value: Any
:return: serialized value
"""
return value.id if isinstance(value, Base) else value
@property
def part(self) -> 'Part':
"""
Retrieve the part that holds this Property.
:returns: The :class:`Part` associated to this property
:raises APIError: if the `Part` is not found
"""
if self._part is None:
self._part = self._client.part(pk=self.part_id, category=self.category)
return self._part
def model(self) -> 'AnyProperty':
"""
Model object of the property if the property is an instance otherwise itself.
Will cache the model object in order to not generate too many API calls. Otherwise will make an API call
to the backend to retrieve its model object.
:return: `Property` model object if the current `Property` is an instance.
:rtype: :class:`pykechain.models.AnyProperty`
"""
if self.category == Category.MODEL:
return self
elif self._model is None:
self._model = self._client.property(pk=self.model_id, category=Category.MODEL)
return self._model
@property
def validators(self):
"""Provide list of Validator objects.
:returns: list of :class:`PropertyValidator` objects
:rtype: list(PropertyValidator)
"""
return self._validators
@validators.setter
def validators(self, validators: Iterable[PropertyValidator]) -> None:
if self.category != Category.MODEL:
raise IllegalArgumentError("To update the list of validators, it can only work on "
"`Property` of category 'MODEL'")
if not isinstance(validators, (tuple, list)) or not all(isinstance(v, PropertyValidator) for v in validators):
raise IllegalArgumentError('Should be a list or tuple with PropertyValidator objects, '
'got {}'.format(type(validators)))
for validator in validators:
validator.validate_json()
# set the internal validators list
self._validators = list(set(validators))
# dump to _json options
self._dump_validators()
# update the options to KE-chain backend
self.edit(options=self._options)
def _parse_validators(self):
"""Parse the validator in the options to validators."""
self._validators = []
validators_json = self._options.get('validators')
for validator_json in validators_json:
self._validators.append(PropertyValidator.parse(json=validator_json))
def _dump_validators(self):
"""Dump the validators as json inside the _options dictionary with the key `validators`."""
validators_json = []
for validator in self._validators:
if isinstance(validator, PropertyValidator):
validators_json.append(validator.as_json())
else:
raise APIError("validator is not a PropertyValidator: '{}'".format(validator))
if self._options.get('validators', list()) == validators_json:
# no change
pass
else:
new_options = self._options.copy() # make a copy
new_options.update({'validators': validators_json})
validate(new_options, options_json_schema)
self._options = new_options
@property
def is_valid(self) -> Optional[bool]:
"""Determine if the value in the property is valid.
If the value of the property is validated as 'valid', than returns a True, otherwise a False.
When no validators are configured, returns a None. It checks against all configured validators
and returns a single boolean outcome.
:returns: True when the `value` is valid
:rtype: bool or None
"""
if not self._validators:
return None
else:
self.validate(reason=False)
if all([vr is None for vr in self._validation_results]):
return None
else:
return all(self._validation_results)
@property
def is_invalid(self) -> Optional[bool]:
"""Determine if the value in the property is invalid.
If the value of the property is validated as 'invalid', than returns a True, otherwise a False.
When no validators are configured, returns a None. It checks against all configured validators
and returns a single boolean outcome.
:returns: True when the `value` is invalid
:rtype: bool
"""
return not self.is_valid if self.is_valid is not None else None
def validate(self, reason: bool = True) -> List[Union[bool, Tuple]]:
"""Return the validation results and include an (optional) reason.
If reason keyword is true, the validation is returned for each validation
the [(<result: bool>, <reason:str>), ...]. If reason is False, only a single list of validation results
for each configured validator is returned.
:param reason: (optional) switch to indicate if the reason of the validation should be provided
:type reason: bool
:return: list of validation results [bool, bool, ...] or
a list of validation results, reasons [(bool, str), ...]
:rtype: list(bool) or list((bool, str))
:raises Exception: for incorrect validators or incompatible values
"""
self._validation_results = [validator.is_valid(self._value) for validator in self._validators]
self._validation_reasons = [validator.get_reason() for validator in self._validators]
if reason:
return list(zip(self._validation_results, self._validation_reasons))
else:
return self._validation_results
@property
def representations(self):
"""Get and set the property representations."""
return self._representations_container.get_representations()
@representations.setter
def representations(self, value):
if self.category != Category.MODEL:
raise IllegalArgumentError("To update the list of representations, it can only work on a "
"`Property` of category '{}'".format(Category.MODEL))
self._representations_container.set_representations(value)
def _save_representations(self, representation_options):
self._options.update({'representations': representation_options})
self.edit(options=self._options)
@classmethod
def create(cls, json: dict, **kwargs) -> 'AnyProperty':
"""Create a property based on the json data.
This method will attach the right class to a property, enabling the use of type-specific methods.
It does not create a property object in KE-chain. But a pseudo :class:`Property` object.
:param json: the json from which the :class:`Property` object to create
:type json: dict
:return: a :class:`Property` object
"""
property_type = json.get('property_type')
from pykechain.models import property_type_to_class_map
# Get specific Property subclass, defaulting to Property itself
property_class = property_type_to_class_map.get(property_type, Property)
# Call constructor and return new object
return property_class(json, **kwargs)
def edit(
self,
name: Optional[Text] = empty,
description: Optional[Text] = empty,
unit: Optional[Text] = empty,
options: Optional[Dict] = empty,
**kwargs
) -> None:
"""Edit the details of a property (model).
Setting an input to None will clear out the value (exception being name).
:param name: (optional) new name of the property to edit. Cannot be cleared.
:type name: basestring or None or Empty
:param description: (optional) new description of the property. Can be cleared.
:type description: basestring or None or Empty
:param unit: (optional) new unit of the property. Can be cleared.
:type unit: basestring or None or Empty
:param options: (options) new options of the property. Can be cleared.
:type options: dict or None or Empty
:param kwargs: (optional) additional kwargs to be edited
:return: None
:raises APIError: When unable to edit the property
:raises IllegalArgumentError: when the type of the input is provided incorrect.
Examples
--------
>>> front_fork = project.part('Front Fork')
>>> color_property = front_fork.property(name='Color')
>>> color_property.edit(name='Shade',description='Could also be called tint, depending on mixture',unit='RGB')
>>> wheel_property_reference = self.project.model('Bike').property('Reference wheel')
>>> wheel_model = self.project.model('Wheel')
>>> diameter_property = wheel_model.property('Diameter')
>>> spokes_property = wheel_model.property('Spokes')
>>> prefilters = {'property_value': diameter_property.id + ":{}:lte".format(15)}
>>> propmodels_excl = [spokes_property.id]
>>> options = dict()
>>> options['prefilters'] = prefilters
>>> options['propmodels_excl'] = propmodels_excl
>>> wheel_property_reference.edit(options=options)
Not mentioning an input parameter in the function will leave it unchanged. Setting a parameter as None will
clear its value (where that is possible). The example below will clear the description, but leave everything
else unchanged.
>>> wheel_property.edit(description=None)
"""
update_dict = {
'name': check_text(name, 'name') or self.name,
'description': check_text(description, 'description') or str(),
'unit': check_text(unit, 'unit') or str(),
'value_options': check_type(options, dict, 'options') or dict()
}
if kwargs: # pragma: no cover
update_dict.update(kwargs)
update_dict = clean_empty_values(update_dict=update_dict)
if self.use_bulk_update:
self._pend_update(data=update_dict)
else:
update_dict["id"] = self.id
response = self._client._request(
'PUT',
self._client._build_url('property', property_id=self.id),
params=API_EXTRA_PARAMS['property'],
json=update_dict
)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not update Property {}".format(self), response=response)
self.refresh(json=response.json()['results'][0])
def delete(self) -> None:
"""Delete this property.
:return: None
:raises APIError: if delete was not successful
"""
response = self._client._request('DELETE', self._client._build_url('property', property_id=self.id))
if response.status_code != requests.codes.no_content: # pragma: no cover
raise APIError("Could not delete Property {}".format(self), response=response)
def copy(self, target_part: 'Part', name: Optional[Text] = None) -> 'Property':
"""Copy a property model or instance.
:param target_part: `Part` object under which the desired `Property` is copied
:type target_part: :class:`Part`
:param name: how the copied `Property` should be called
:type name: basestring
:return: copied :class:`Property` model.
:raises IllegalArgumentError: if property and target_part have different `Category`
Example
-------
>>> property_to_copy = client.property(name='Diameter')
>>> bike = client.model('Bike')
>>> property_to_copy.copy(target_part=bike, name='Bike diameter?')
"""
from pykechain.models import Part
check_type(target_part, Part, 'target_part')
name = check_text(name, 'name') or self.name
if self.category == Category.MODEL and target_part.category == Category.MODEL:
# Cannot move a `Property` model under a `Part` instance or vice versa
copied_property_model = target_part.add_property(name=name,
property_type=self.type,
description=self.description,
unit=self.unit,
default_value=self.value,
options=self._options
)
return copied_property_model
elif self.category == Category.INSTANCE and target_part.category == Category.INSTANCE:
target_model = target_part.model()
self_model = self.model()
target_model.add_property(name=name,
property_type=self_model.type,
description=self_model.description,
unit=self_model.unit,
default_value=self_model.value,
options=self_model._options
)
target_part.refresh()
copied_property_instance = target_part.property(name=name)
copied_property_instance.value = self.value
return copied_property_instance
else:
raise IllegalArgumentError('property "{}" and target part "{}" must have the same category'.
format(self.name, target_part.name))
def move(self, target_part: 'Part', name: Optional[Text] = None) -> 'Property':
"""Move a property model or instance.
:param target_part: `Part` object under which the desired `Property` is moved
:type target_part: :class:`Part`
:param name: how the moved `Property` should be called
:type name: basestring
:return: copied :class:`Property` model.
:raises IllegalArgumentError: if property and target_part have different `Category`
Example
-------
>>> property_to_move = client.property(name='Diameter')
>>> bike = client.model('Bike')
>>> property_to_move.move(target_part=bike, name='Bike diameter?')
"""
moved_property = self.copy(target_part=target_part, name=name)
if self.category == Category.MODEL:
self.delete()
else:
self.model().delete()
return moved_property
| apache-2.0 | -9,096,772,522,678,147,000 | 41.098299 | 118 | 0.613022 | false |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_format08.py | 1 | 1699 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_format08.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [46164608, 46176128]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'trendline': {'type': 'linear'},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | -4,284,780,156,729,032,000 | 24.742424 | 79 | 0.527369 | false |
ChantyTaguan/zds-site | zds/forum/tests.py | 1 | 34223 | # coding: utf-8
from django.conf import settings
from django.test import TestCase
from django.core.urlresolvers import reverse
from zds.utils import slugify
from zds.forum.factories import CategoryFactory, ForumFactory, \
TopicFactory, PostFactory
from zds.member.factories import ProfileFactory, StaffProfileFactory
from zds.utils.models import CommentLike, CommentDislike, Alert
from django.core import mail
from .models import Post, Topic, TopicFollowed, TopicRead
class ForumMemberTests(TestCase):
def setUp(self):
settings.EMAIL_BACKEND = \
'django.core.mail.backends.locmem.EmailBackend'
self.category1 = CategoryFactory(position=1)
self.category2 = CategoryFactory(position=2)
self.category3 = CategoryFactory(position=3)
self.forum11 = ForumFactory(
category=self.category1,
position_in_category=1)
self.forum12 = ForumFactory(
category=self.category1,
position_in_category=2)
self.forum13 = ForumFactory(
category=self.category1,
position_in_category=3)
self.forum21 = ForumFactory(
category=self.category2,
position_in_category=1)
self.forum22 = ForumFactory(
category=self.category2,
position_in_category=2)
self.user = ProfileFactory().user
self.user2 = ProfileFactory().user
log = self.client.login(
username=self.user.username,
password='hostel77')
self.assertEqual(log, True)
def test_display(self):
"""Test forum display (full: root, category, forum) Topic display test
is in creation topic test."""
# Forum root
response = self.client.get(reverse('zds.forum.views.index'))
self.assertContains(response, 'Liste des forums')
# Category
response = self.client.get(
reverse(
'zds.forum.views.cat_details',
args=[
self.category1.slug]))
self.assertContains(response, self.category1.title)
# Forum
response = self.client.get(
reverse(
'zds.forum.views.details',
args=[
self.category1.slug,
self.forum11.slug]))
self.assertContains(response, self.category1.title)
self.assertContains(response, self.forum11.title)
def test_create_topic(self):
"""To test all aspects of topic's creation by member."""
result = self.client.post(
reverse('zds.forum.views.new') + '?forum={0}'
.format(self.forum12.pk),
{'title': u'Un autre sujet',
'subtitle': u'Encore ces lombards en plein ete',
'text': u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter '
},
follow=False)
self.assertEqual(result.status_code, 302)
# check topic's number
self.assertEqual(Topic.objects.all().count(), 1)
topic = Topic.objects.get(pk=1)
# check post's number
self.assertEqual(Post.objects.all().count(), 1)
post = Post.objects.get(pk=1)
# check topic and post
self.assertEqual(post.topic, topic)
# check position
self.assertEqual(post.position, 1)
self.assertEqual(post.author, self.user)
self.assertEqual(post.editor, None)
self.assertNotEqual(post.ip_address, None)
self.assertNotEqual(post.text_html, None)
self.assertEqual(post.like, 0)
self.assertEqual(post.dislike, 0)
self.assertEqual(post.is_visible, True)
# check last message
self.assertEqual(topic.last_message, post)
# Check view
response = self.client.get(topic.get_absolute_url())
self.assertContains(response, self.category1.title)
self.assertContains(response, self.forum11.title)
self.assertContains(response, topic.title)
self.assertContains(response, topic.subtitle)
def test_answer(self):
"""To test all aspects of answer."""
user1 = ProfileFactory().user
user2 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=self.user, position=2)
post3 = PostFactory(topic=topic1, author=user1, position=3)
TopicRead(topic=topic1, user=user1, post=post3).save()
TopicRead(topic=topic1, user=user2, post=post3).save()
TopicRead(topic=topic1, user=self.user, post=post3).save()
TopicFollowed(topic=topic1, user=user1, email=True).save()
TopicFollowed(topic=topic1, user=user2, email=True).save()
TopicFollowed(topic=topic1, user=self.user, email=True).save()
result = self.client.post(
reverse('zds.forum.views.answer') + '?sujet={0}'.format(topic1.pk),
{
'last_post': topic1.last_message.pk,
'text': u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter '
},
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEquals(len(mail.outbox), 2)
# check topic's number
self.assertEqual(Topic.objects.all().count(), 1)
# check post's number
self.assertEqual(Post.objects.all().count(), 4)
# check topic and post
self.assertEqual(post1.topic, topic1)
self.assertEqual(post2.topic, topic1)
self.assertEqual(post3.topic, topic1)
# check values
self.assertEqual(Post.objects.get(pk=4).topic, topic1)
self.assertEqual(Post.objects.get(pk=4).position, 4)
self.assertEqual(Post.objects.get(pk=4).editor, None)
self.assertEqual(
Post.objects.get(
pk=4).text,
u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter ')
def test_edit_main_post(self):
"""To test all aspects of the edition of main post by member."""
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
topic2 = TopicFactory(forum=self.forum12, author=self.user)
post2 = PostFactory(topic=topic2, author=self.user, position=1)
topic3 = TopicFactory(forum=self.forum21, author=self.user)
post3 = PostFactory(topic=topic3, author=self.user, position=1)
result = self.client.post(
reverse('zds.forum.views.edit_post') + '?message={0}'
.format(post1.pk),
{'title': u'Un autre sujet',
'subtitle': u'Encore ces lombards en plein été',
'text': u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter '
},
follow=False)
self.assertEqual(result.status_code, 302)
# check topic's number
self.assertEqual(Topic.objects.all().count(), 3)
# check post's number
self.assertEqual(Post.objects.all().count(), 3)
# check topic and post
self.assertEqual(post1.topic, topic1)
self.assertEqual(post2.topic, topic2)
self.assertEqual(post3.topic, topic3)
# check values
self.assertEqual(
Topic.objects.get(
pk=topic1.pk).title,
u'Un autre sujet')
self.assertEqual(
Topic.objects.get(
pk=topic1.pk).subtitle,
u'Encore ces lombards en plein été')
self.assertEqual(
Post.objects.get(
pk=post1.pk).text,
u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter ')
# check edit data
self.assertEqual(Post.objects.get(pk=post1.pk).editor, self.user)
def test_edit_post(self):
"""To test all aspects of the edition of simple post by member."""
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=self.user, position=2)
post3 = PostFactory(topic=topic1, author=self.user, position=3)
result = self.client.post(
reverse('zds.forum.views.edit_post') + '?message={0}'
.format(post2.pk),
{
'text': u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter '
},
follow=False)
self.assertEqual(result.status_code, 302)
# check topic's number
self.assertEqual(Topic.objects.all().count(), 1)
# check post's number
self.assertEqual(Post.objects.all().count(), 3)
# check topic and post
self.assertEqual(post1.topic, topic1)
self.assertEqual(post2.topic, topic1)
self.assertEqual(post3.topic, topic1)
# check values
self.assertEqual(
Post.objects.get(
pk=post2.pk).text,
u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter ')
# check edit data
self.assertEqual(Post.objects.get(pk=post2.pk).editor, self.user)
def test_quote_post(self):
"""To test when a member quote anyone post."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
PostFactory(topic=topic1, author=user1, position=3)
result = self.client.get(
reverse('zds.forum.views.answer') +
'?sujet={0}&cite={0}'.format(
topic1.pk,
post2.pk),
follow=True)
self.assertEqual(result.status_code, 200)
def test_signal_post(self):
"""To test when a member quote anyone post."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
PostFactory(topic=topic1, author=user1, position=3)
result = self.client.post(
reverse('zds.forum.views.edit_post') +
'?message={0}'.format(post2.pk),
{
'signal_text': u'Troll',
'signal_message': 'confirmer'
},
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(Alert.objects.all().count(), 1)
self.assertEqual(Alert.objects.filter(author=self.user).count(), 1)
self.assertEqual(Alert.objects.get(author=self.user).text, u'Troll')
def test_like_post(self):
"""Test when a member like any post."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
post3 = PostFactory(topic=topic1, author=self.user, position=3)
result = self.client.get(
reverse('zds.forum.views.like_post') +
'?message={0}'.format(
post2.pk),
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(CommentLike.objects.all().count(), 1)
self.assertEqual(Post.objects.get(pk=post1.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).like, 1)
self.assertEqual(Post.objects.get(pk=post3.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post1.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post3.pk).dislike, 0)
self.assertEqual(
CommentLike.objects.filter(
comments__pk=post1.pk).all().count(),
0)
self.assertEqual(
CommentLike.objects.filter(
comments__pk=post2.pk).all().count(),
1)
self.assertEqual(
CommentLike.objects.filter(
comments__pk=post3.pk).all().count(),
0)
result = self.client.get(
reverse('zds.forum.views.like_post') +
'?message={0}'.format(
post1.pk),
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(CommentLike.objects.all().count(), 1)
self.assertEqual(Post.objects.get(pk=post1.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).like, 1)
self.assertEqual(Post.objects.get(pk=post3.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post1.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post3.pk).dislike, 0)
self.assertEqual(
CommentLike.objects.filter(
comments__pk=post1.pk).all().count(),
0)
self.assertEqual(
CommentLike.objects.filter(
comments__pk=post2.pk).all().count(),
1)
self.assertEqual(
CommentLike.objects.filter(
comments__pk=post3.pk).all().count(),
0)
def test_dislike_post(self):
"""Test when a member dislike any post."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
post3 = PostFactory(topic=topic1, author=self.user, position=3)
result = self.client.get(
reverse('zds.forum.views.dislike_post') +
'?message={0}'.format(
post2.pk),
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(CommentDislike.objects.all().count(), 1)
self.assertEqual(Post.objects.get(pk=post1.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post3.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post1.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).dislike, 1)
self.assertEqual(Post.objects.get(pk=post3.pk).dislike, 0)
self.assertEqual(
CommentDislike.objects.filter(
comments__pk=post1.pk).all().count(),
0)
self.assertEqual(
CommentDislike.objects.filter(
comments__pk=post2.pk).all().count(),
1)
self.assertEqual(
CommentDislike.objects.filter(
comments__pk=post3.pk).all().count(),
0)
result = self.client.get(
reverse('zds.forum.views.like_post') +
'?message={0}'.format(
post1.pk),
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(CommentDislike.objects.all().count(), 1)
self.assertEqual(Post.objects.get(pk=post1.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post3.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post1.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).dislike, 1)
self.assertEqual(Post.objects.get(pk=post3.pk).dislike, 0)
self.assertEqual(
CommentDislike.objects.filter(
comments__pk=post1.pk).all().count(),
0)
self.assertEqual(
CommentDislike.objects.filter(
comments__pk=post2.pk).all().count(),
1)
self.assertEqual(
CommentDislike.objects.filter(
comments__pk=post3.pk).all().count(),
0)
def test_useful_post(self):
"""To test when a member mark a post is usefull."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
post3 = PostFactory(topic=topic1, author=user1, position=3)
result = self.client.get(
reverse('zds.forum.views.useful_post') +
'?message={0}'.format(
post2.pk),
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(Post.objects.get(pk=post1.pk).is_useful, False)
self.assertEqual(Post.objects.get(pk=post2.pk).is_useful, True)
self.assertEqual(Post.objects.get(pk=post3.pk).is_useful, False)
# useful the first post
result = self.client.get(
reverse('zds.forum.views.useful_post') +
'?message={0}'.format(
post1.pk),
follow=False)
self.assertEqual(result.status_code, 403)
self.assertEqual(Post.objects.get(pk=post1.pk).is_useful, False)
self.assertEqual(Post.objects.get(pk=post2.pk).is_useful, True)
self.assertEqual(Post.objects.get(pk=post3.pk).is_useful, False)
# useful if you aren't author
TopicFactory(forum=self.forum11, author=user1)
post4 = PostFactory(topic=topic1, author=user1, position=1)
post5 = PostFactory(topic=topic1, author=self.user, position=2)
result = self.client.get(
reverse('zds.forum.views.useful_post') +
'?message={0}'.format(
post5.pk),
follow=False)
self.assertEqual(result.status_code, 403)
self.assertEqual(Post.objects.get(pk=post4.pk).is_useful, False)
self.assertEqual(Post.objects.get(pk=post5.pk).is_useful, False)
# useful if you are staff
StaffProfileFactory().user
self.assertEqual(self.client.login(
username=self.user.username,
password='hostel77'),
True)
result = self.client.get(
reverse('zds.forum.views.useful_post') +
'?message={0}'.format(
post4.pk),
follow=False)
self.assertNotEqual(result.status_code, 403)
self.assertEqual(Post.objects.get(pk=post4.pk).is_useful, True)
self.assertEqual(Post.objects.get(pk=post5.pk).is_useful, False)
def test_move_topic(self):
"""Test topic move."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
PostFactory(topic=topic1, author=user1, position=2)
PostFactory(topic=topic1, author=self.user, position=3)
# not staff member can't move topic
result = self.client.post(
reverse('zds.forum.views.move_topic') +
'?sujet={0}'.format(
topic1.pk),
{
'forum': self.forum12},
follow=False)
self.assertEqual(result.status_code, 403)
# test with staff
staff1 = StaffProfileFactory().user
self.assertEqual(
self.client.login(
username=staff1.username,
password='hostel77'),
True)
result = self.client.post(
reverse('zds.forum.views.move_topic') +
'?sujet={0}'.format(
topic1.pk),
{
'forum': self.forum12.pk},
follow=False)
self.assertEqual(result.status_code, 302)
# check value
self.assertEqual(
Topic.objects.get(
pk=topic1.pk).forum.pk,
self.forum12.pk)
def test_answer_empty(self):
"""Test behaviour on empty answer."""
# Topic and 1st post by another user, to avoid antispam limitation
topic1 = TopicFactory(forum=self.forum11, author=self.user2)
PostFactory(topic=topic1, author=self.user2, position=1)
result = self.client.post(
reverse('zds.forum.views.answer') + '?sujet={0}'.format(topic1.pk),
{
'last_post': topic1.last_message.pk,
'text': u' '
},
follow=False)
# Empty text --> preview = HTTP 200 + post not saved (only 1 post in
# topic)
self.assertEqual(result.status_code, 200)
self.assertEqual(Post.objects.filter(topic=topic1.pk).count(), 1)
def test_mandatory_fields_on_new(self):
"""Test handeling of mandatory fields on new topic creation."""
init_topic_count = Topic.objects.all().count()
# Empty fields
response = self.client.post(
reverse('zds.forum.views.new') +
'?forum={0}'.format(
self.forum12.pk),
{},
follow=False)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.all().count(), init_topic_count)
# Blank data
response = self.client.post(
reverse('zds.forum.views.new') +
'?forum={0}'.format(
self.forum12.pk),
{
'title': u' ',
'text': u' ',
},
follow=False)
self.assertEqual(response.status_code, 200)
self.assertEqual(Topic.objects.all().count(), init_topic_count)
def test_url_topic(self):
"""Test simple get request to the topic."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
PostFactory(topic=topic1, author=user1, position=2)
PostFactory(topic=topic1, author=self.user, position=3)
# simple member can read public topic
result = self.client.get(
reverse(
'zds.forum.views.topic',
args=[
topic1.pk,
slugify(topic1.title)]),
follow=True)
self.assertEqual(result.status_code, 200)
class ForumGuestTests(TestCase):
def setUp(self):
settings.EMAIL_BACKEND = \
'django.core.mail.backends.locmem.EmailBackend'
self.category1 = CategoryFactory(position=1)
self.category2 = CategoryFactory(position=2)
self.category3 = CategoryFactory(position=3)
self.forum11 = ForumFactory(
category=self.category1,
position_in_category=1)
self.forum12 = ForumFactory(
category=self.category1,
position_in_category=2)
self.forum13 = ForumFactory(
category=self.category1,
position_in_category=3)
self.forum21 = ForumFactory(
category=self.category2,
position_in_category=1)
self.forum22 = ForumFactory(
category=self.category2,
position_in_category=2)
self.user = ProfileFactory().user
def test_display(self):
"""Test forum display (full: root, category, forum) Topic display test
is in creation topic test."""
# Forum root
response = self.client.get(reverse('zds.forum.views.index'))
self.assertContains(response, 'Liste des forums')
# Category
response = self.client.get(
reverse(
'zds.forum.views.cat_details',
args=[
self.category1.slug]))
self.assertContains(response, self.category1.title)
# Forum
response = self.client.get(
reverse(
'zds.forum.views.details',
args=[
self.category1.slug,
self.forum11.slug]))
self.assertContains(response, self.category1.title)
self.assertContains(response, self.forum11.title)
def test_create_topic(self):
"""To test all aspects of topic's creation by guest."""
result = self.client.post(
reverse('zds.forum.views.new') + '?forum={0}'
.format(self.forum12.pk),
{'title': u'Un autre sujet',
'subtitle': u'Encore ces lombards en plein ete',
'text': u'C\'est tout simplement l\'histoire de '
u'la ville de Paris que je voudrais vous conter '
},
follow=False)
self.assertEqual(result.status_code, 302)
# check topic's number
self.assertEqual(Topic.objects.all().count(), 0)
# check post's number
self.assertEqual(Post.objects.all().count(), 0)
def test_answer(self):
"""To test all aspects of answer."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
PostFactory(topic=topic1, author=self.user, position=2)
PostFactory(topic=topic1, author=user1, position=3)
result = self.client.post(
reverse('zds.forum.views.answer') + '?sujet={0}'.format(topic1.pk),
{
'last_post': topic1.last_message.pk,
'text': u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter '
},
follow=False)
self.assertEqual(result.status_code, 302)
# check topic's number
self.assertEqual(Topic.objects.all().count(), 1)
# check post's number
self.assertEqual(Post.objects.all().count(), 3)
def test_edit_main_post(self):
"""To test all aspects of the edition of main post by guest."""
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
topic2 = TopicFactory(forum=self.forum12, author=self.user)
PostFactory(topic=topic2, author=self.user, position=1)
topic3 = TopicFactory(forum=self.forum21, author=self.user)
PostFactory(topic=topic3, author=self.user, position=1)
result = self.client.post(
reverse('zds.forum.views.edit_post') + '?message={0}'
.format(post1.pk),
{'title': u'Un autre sujet',
'subtitle': u'Encore ces lombards en plein été',
'text': u'C\'est tout simplement l\'histoire de la ville de Paris que je voudrais vous conter '
},
follow=False)
self.assertEqual(result.status_code, 302)
self.assertNotEqual(
Topic.objects.get(
pk=topic1.pk).title,
u'Un autre sujet')
self.assertNotEqual(
Topic.objects.get(
pk=topic1.pk).subtitle,
u'Encore ces lombards en plein été')
self.assertNotEqual(
Post.objects.get(
pk=post1.pk).text,
u'C\'est tout simplement l\'histoire de la ville de '
u'Paris que je voudrais vous conter ')
def test_edit_post(self):
"""To test all aspects of the edition of simple post by guest."""
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=self.user, position=2)
PostFactory(topic=topic1, author=self.user, position=3)
result = self.client.post(
reverse('zds.forum.views.edit_post') + '?message={0}'
.format(post2.pk),
{
'text': u'C\'est tout simplement l\'histoire de '
u'la ville de Paris que je voudrais vous conter '
},
follow=False)
self.assertEqual(result.status_code, 302)
self.assertNotEqual(
Post.objects.get(
pk=post2.pk).text,
u'C\'est tout simplement l\'histoire de la ville de '
u'Paris que je voudrais vous conter ')
def test_quote_post(self):
"""To test when a member quote anyone post."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
PostFactory(topic=topic1, author=user1, position=3)
result = self.client.get(
reverse('zds.forum.views.answer') +
'?sujet={0}&cite={0}'.format(
topic1.pk,
post2.pk),
follow=False)
self.assertEqual(result.status_code, 302)
def test_signal_post(self):
"""To test when a member quote anyone post."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
PostFactory(topic=topic1, author=user1, position=3)
result = self.client.post(
reverse('zds.forum.views.edit_post') +
'?message={0}'.format(post2.pk),
{
'signal_text': u'Troll',
'signal_message': 'confirmer'
},
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(Alert.objects.all().count(), 0)
self.assertEqual(Alert.objects.filter(author=self.user).count(), 0)
def test_like_post(self):
"""Test when a member like any post."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
post3 = PostFactory(topic=topic1, author=self.user, position=3)
result = self.client.get(
reverse('zds.forum.views.like_post') +
'?message={0}'.format(
post2.pk),
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(CommentLike.objects.all().count(), 0)
self.assertEqual(Post.objects.get(pk=post1.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post3.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post1.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post3.pk).dislike, 0)
def test_dislike_post(self):
"""Test when a member dislike any post."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
post3 = PostFactory(topic=topic1, author=self.user, position=3)
result = self.client.get(
reverse('zds.forum.views.dislike_post') +
'?message={0}'.format(
post2.pk),
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(CommentDislike.objects.all().count(), 0)
self.assertEqual(Post.objects.get(pk=post1.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post3.pk).like, 0)
self.assertEqual(Post.objects.get(pk=post1.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post2.pk).dislike, 0)
self.assertEqual(Post.objects.get(pk=post3.pk).dislike, 0)
def test_useful_post(self):
"""To test when a guest mark a post is usefull."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
post1 = PostFactory(topic=topic1, author=self.user, position=1)
post2 = PostFactory(topic=topic1, author=user1, position=2)
post3 = PostFactory(topic=topic1, author=user1, position=3)
result = self.client.get(
reverse('zds.forum.views.useful_post') +
'?message={0}'.format(
post2.pk),
follow=False)
self.assertEqual(result.status_code, 302)
self.assertEqual(Post.objects.get(pk=post1.pk).is_useful, False)
self.assertEqual(Post.objects.get(pk=post2.pk).is_useful, False)
self.assertEqual(Post.objects.get(pk=post3.pk).is_useful, False)
def test_move_topic(self):
"""Test topic move."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
PostFactory(topic=topic1, author=user1, position=2)
PostFactory(topic=topic1, author=self.user, position=3)
# not staff guest can't move topic
result = self.client.post(
reverse('zds.forum.views.move_topic') +
'?sujet={0}'.format(
topic1.pk),
{
'forum': self.forum12},
follow=False)
self.assertEqual(result.status_code, 302)
self.assertNotEqual(
Topic.objects.get(
pk=topic1.pk).forum,
self.forum12)
def test_url_topic(self):
"""Test simple get request to the topic."""
user1 = ProfileFactory().user
topic1 = TopicFactory(forum=self.forum11, author=self.user)
PostFactory(topic=topic1, author=self.user, position=1)
PostFactory(topic=topic1, author=user1, position=2)
PostFactory(topic=topic1, author=self.user, position=3)
# guest can read public topic
result = self.client.get(
reverse(
'zds.forum.views.topic',
args=[
topic1.pk,
slugify(topic1.title)]),
follow=True)
self.assertEqual(result.status_code, 200)
| gpl-3.0 | 6,291,033,389,545,015,000 | 38.102857 | 111 | 0.594272 | false |
allrod5/extra-trees | benchmarks/classification/decision_surface.py | 1 | 4826 | print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for MCZA015-13 class project by Rodrigo Martins de Oliveira
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_circles
from sklearn.datasets import make_classification
from sklearn.datasets import make_moons
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier as SKExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from extra_trees.ensemble.forest import ExtraTreesClassifier
h = .02 # step size in the mesh
names = [
"Nearest Neighbors",
"Linear SVM",
"RBF SVM",
"Gaussian Process",
"Neural Net",
"Naive Bayes",
"QDA",
"AdaBoost",
"Decision Tree",
"Random Forest",
"ExtraTrees (SciKit)",
"ExtraTrees",
]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
MLPClassifier(alpha=1),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
AdaBoostClassifier(),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=2),
SKExtraTreesClassifier(n_estimators=10, max_features=2),
ExtraTreesClassifier(n_estimators=10, max_features=2),
]
X, y = make_classification(
n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [
make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(33, 11))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(
np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(
xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| mit | 2,900,732,538,703,332,000 | 31.375839 | 78 | 0.63806 | false |
KonradBreitsprecher/espresso | samples/load_checkpoint.py | 1 | 1891 | from __future__ import print_function
import espressomd
from espressomd import checkpointing
checkpoint = checkpointing.Checkpointing(checkpoint_id="mycheckpoint")
checkpoint.load()
# necessary for using e.g. system.actors, since explicit checkpointing of actors is not implemented yet
system = espressomd.System(box_l=[10.7437, 10.7437, 10.7437])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
# test user variable
print("\n### user variable test ###")
print("myvar = {}".format(myvar))
print("skin = {}".format(skin))
# test "system"
print("\n### system test ###")
print("system.time = {}".format(system.time))
print("system.box_l = {}".format(system.box_l))
# system.cell_system not implemented yet, see sample script store_properties.py
system.cell_system.skin = skin
# test "system.non_bonded_inter"
print("\n### system.non_bonded_inter test ###")
print("system.non_bonded_inter[0, 0].lennard_jones.get_params() = {}".format(system.non_bonded_inter[0, 0].lennard_jones.get_params()))
# test "system.part"
print("\n### system.part test ###")
print("system.part[:].pos = {}".format(system.part[:].pos))
# test "system.thermostat"
print("\n### system.thermostat test ###")
print("system.thermostat.get_state() = {}".format(system.thermostat.get_state()))
# test "p3m"
print("\n### p3m test ###")
print("p3m.get_params() = {}".format(p3m.get_params()))
system.actors.add(p3m)
# test registered objects
# all objects that are registered when writing a checkpoint are automatically registered after loading this checkpoint
print("\n### checkpoint register test ###")
print("checkpoint.get_registered_objects() = {}".format(checkpoint.get_registered_objects()))
# integrate system and finally save checkpoint
print("\n### Integrate until user presses ctrl+c ###")
print("Integrating...")
while True:
system.integrator.run(1000)
| gpl-3.0 | -1,866,564,892,776,036,400 | 35.365385 | 135 | 0.712322 | false |
F483/gravur | gravur/common/abstractscrollview.py | 1 | 1148 | # coding: utf-8
# Copyright (c) 2015 Fabian Barkhau <[email protected]>
# License: MIT (see LICENSE file)
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
class AbstractScrollView(ScrollView):
def __init__(self, *args, **kwargs):
# Setup scroll view
kwargs.update({
"pos_hint": {'center_x': 0.5, 'center_y': 0.5},
"do_scroll_x": False
})
super(AbstractScrollView, self).__init__(*args, **kwargs)
# create layout
layout = GridLayout(cols=kwargs.get("cols", 1),
padding=kwargs.get("padding", 0),
spacing=kwargs.get("spacing", 0),
size_hint_y=None)
layout.bind(minimum_height=layout.setter('height'))
# add widgets to layout
entries = kwargs.get('entries', [])
for entry in entries:
widget = self.entry_to_widget(entry)
layout.add_widget(widget)
# add layout
self.add_widget(layout)
def entry_to_widget(self, entry):
raise Exception("Abstract method!")
| mit | 1,983,399,102,228,169,200 | 29.210526 | 65 | 0.568815 | false |
reetawwsum/Machine-Translation | utils/batchGenerator.py | 1 | 2290 | from __future__ import print_function
from __future__ import absolute_import
import random
import numpy as np
import config
from utils.dataset import Dataset
class BatchGenerator():
'''Generate Batches'''
def __init__(self):
self.batch_size = config.FLAGS.batch_size
self.buckets = buckets = config.BUCKETS
self.PAD_ID = config.PAD_ID
self.GO_ID = config.GO_ID
self.load_dataset()
data_bucket_sizes = [len(self.data[b]) for b in xrange(len(buckets))]
data_total_size = float(sum(data_bucket_sizes))
self.data_buckets_scale = [sum(data_bucket_sizes[:i + 1]) / data_total_size for i in xrange(len(data_bucket_sizes))]
def load_dataset(self):
dataset = Dataset()
self.data = dataset.data
def next(self):
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(self.data_buckets_scale)) if self.data_buckets_scale[i] > random_number_01])
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(self.data[bucket_id])
encoder_pad = [self.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
decoder_pad = [self.PAD_ID] * (decoder_size - len(decoder_input) - 1)
decoder_inputs.append([self.GO_ID] + decoder_input + decoder_pad)
batch_encoder_inputs, batch_decoder_inputs, batch_target_weights = [], [], []
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(np.array([encoder_inputs[batch_idx][length_idx] for batch_idx in xrange(self.batch_size)], dtype=np.int32))
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(np.array([decoder_inputs[batch_idx][length_idx] for batch_idx in xrange(self.batch_size)], dtype=np.int32))
batch_target_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == self.PAD_ID:
batch_target_weight[batch_idx] = 0.0
batch_target_weights.append(batch_target_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_target_weights, bucket_id
| mit | -3,227,155,912,839,335,000 | 34.78125 | 138 | 0.706114 | false |
mattgrogan/ledmatrix | sensor_ui/rotary_encoder.py | 1 | 3290 | from gpiozero import Button
class RotaryEncoder:
"""
Decode mechanical rotary encoder pulses.
The following example will print a Rotary Encoder change direction
from gpiozero import RotaryEncoder
def change(value):
if value == 1:
print("clockwise")
else # if change == -1
print("counterclockwise")
rotary = RotaryEncoder(2, 3)
rotary.when_rotated = change
Based in http://abyz.co.uk/rpi/pigpio/examples.html#Python_rotary_encoder_py
"""
gpioA = None
gpioB = None
levA = 0
levB = 0
lastGpio = None
when_rotated = lambda *args : None
def __init__(self, pinA, pinB, pull_up=False):
"""
Uses for dettect rotary encoder changes (set when_rotated attribute)
It takes one parameter which is +1 for clockwise and -1 for counterclockwise.
:param pinA int :
:param pinB int :
:pull_up bool :
The common contact should be NOT connected to ground?
"""
self.gpioA = Button(pinA, pull_up)
self.gpioB = Button(pinB, pull_up)
self.levA = 0
self.levB = 0
self.lastGpio = None
self.gpioA.when_pressed = lambda *args : self.pulse(self.gpioA, 1)
self.gpioA.when_released = lambda *args : self.pulse(self.gpioA, 0)
self.gpioB.when_pressed = lambda *args : self.pulse(self.gpioB, 1)
self.gpioB.when_released = lambda *args : self.pulse(self.gpioB, 0)
def pulse(self, gpio, level):
"""
Decode the rotary encoder pulse.
+---------+ +---------+ 0
| | | |
A | | | |
| | | |
+---------+ +---------+ +----- 1
+---------+ +---------+ 0
| | | |
B | | | |
| | | |
----+ +---------+ +---------+ 1
"""
if gpio == self.gpioA:
self.levA = level
else:
self.levB = level
if gpio != self.lastGpio:
self.lastGpio = gpio
if gpio == self.gpioA and level == 1:
if self.levB == 1:
self.when_rotated(1)
elif gpio == self.gpioB and level == 1:
if self.levA == 1:
self.when_rotated(-1)
else:
if self.levB == 1:
self.when_rotated(-1)
elif self.levA == 1:
self.when_rotated(1)
else:
if gpio == self.gpioA and level == 1:
if self.levB == 1:
self.when_rotated(1)
elif gpio == self.gpioB and level == 1:
if self.levA == 1:
self.when_rotated(-1)
else:
if self.levB == 1:
self.when_rotated(-1)
elif self.levA == 1:
self.when_rotated(1)
| mit | -656,385,531,200,333,000 | 30.254902 | 85 | 0.419149 | false |
dougbenjamin/panda-harvester | pandaharvester/harvestercore/db_proxy.py | 1 | 241342 | """
database connection
"""
import os
import re
import sys
import copy
import random
import inspect
import time
import datetime
import threading
from future.utils import iteritems
from .command_spec import CommandSpec
from .job_spec import JobSpec
from .work_spec import WorkSpec
from .file_spec import FileSpec
from .event_spec import EventSpec
from .cache_spec import CacheSpec
from .seq_number_spec import SeqNumberSpec
from .panda_queue_spec import PandaQueueSpec
from .job_worker_relation_spec import JobWorkerRelationSpec
from .process_lock_spec import ProcessLockSpec
from .diag_spec import DiagSpec
from .service_metrics_spec import ServiceMetricSpec
from .queue_config_dump_spec import QueueConfigDumpSpec
from . import core_utils
from pandaharvester.harvesterconfig import harvester_config
# logger
_logger = core_utils.setup_logger('db_proxy')
# table names
commandTableName = 'command_table'
jobTableName = 'job_table'
workTableName = 'work_table'
fileTableName = 'file_table'
cacheTableName = 'cache_table'
eventTableName = 'event_table'
seqNumberTableName = 'seq_table'
pandaQueueTableName = 'pq_table'
jobWorkerTableName = 'jw_table'
processLockTableName = 'lock_table'
diagTableName = 'diag_table'
queueConfigDumpTableName = 'qcdump_table'
serviceMetricsTableName = 'sm_table'
# connection lock
conLock = threading.Lock()
# connection class
class DBProxy(object):
# constructor
def __init__(self, thr_name=None, read_only=False):
self.thrName = thr_name
self.verbLog = None
self.useInspect = False
if harvester_config.db.verbose:
self.verbLog = core_utils.make_logger(_logger, method_name='execute')
if self.thrName is None:
currentThr = threading.current_thread()
if currentThr is not None:
self.thrName = currentThr.ident
if hasattr(harvester_config.db, 'useInspect') and harvester_config.db.useInspect is True:
self.useInspect = True
if harvester_config.db.engine == 'mariadb':
if hasattr(harvester_config.db, 'host'):
host = harvester_config.db.host
else:
host = '127.0.0.1'
if hasattr(harvester_config.db, 'port'):
port = harvester_config.db.port
else:
port = 3306
if hasattr(harvester_config.db, 'useMySQLdb') and harvester_config.db.useMySQLdb is True:
import MySQLdb
import MySQLdb.cursors
class MyCursor (MySQLdb.cursors.Cursor):
def fetchone(self):
tmpRet = MySQLdb.cursors.Cursor.fetchone(self)
if tmpRet is None:
return None
tmpRet = core_utils.DictTupleHybrid(tmpRet)
tmpRet.set_attributes([d[0] for d in self.description])
return tmpRet
def fetchall(self):
tmpRets = MySQLdb.cursors.Cursor.fetchall(self)
if len(tmpRets) == 0:
return tmpRets
newTmpRets = []
attributes = [d[0] for d in self.description]
for tmpRet in tmpRets:
tmpRet = core_utils.DictTupleHybrid(tmpRet)
tmpRet.set_attributes(attributes)
newTmpRets.append(tmpRet)
return newTmpRets
self.con = MySQLdb.connect(user=harvester_config.db.user, passwd=harvester_config.db.password,
db=harvester_config.db.schema, host=host, port=port,
cursorclass=MyCursor)
self.cur = self.con.cursor()
else:
import mysql.connector
self.con = mysql.connector.connect(user=harvester_config.db.user, passwd=harvester_config.db.password,
db=harvester_config.db.schema, host=host, port=port)
self.cur = self.con.cursor(named_tuple=True, buffered=True)
else:
import sqlite3
if read_only:
fd = os.open(harvester_config.db.database_filename, os.O_RDONLY)
database_filename = '/dev/fd/{0}'.format(fd)
else:
database_filename = harvester_config.db.database_filename
self.con = sqlite3.connect(database_filename,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
check_same_thread=False)
core_utils.set_file_permission(harvester_config.db.database_filename)
# change the row factory to use Row
self.con.row_factory = sqlite3.Row
self.cur = self.con.cursor()
self.cur.execute('PRAGMA journal_mode')
resJ = self.cur.fetchone()
if resJ[0] != 'wal':
self.cur.execute('PRAGMA journal_mode = WAL')
# read to avoid database lock
self.cur.fetchone()
self.lockDB = False
# using application side lock if DB doesn't have a mechanism for exclusive access
if harvester_config.db.engine == 'mariadb':
self.usingAppLock = False
else:
self.usingAppLock = True
# exception handler for type of DBs
def _handle_exception(self, exc, retry_time=30):
tmpLog = core_utils.make_logger(_logger, 'thr={0}'.format(self.thrName), method_name='_handle_exception')
if harvester_config.db.engine == 'mariadb':
tmpLog.warning('exception of mysql {0} occurred'.format(exc.__class__.__name__))
# Case to try renew connection
isOperationalError = False
if hasattr(harvester_config.db, 'useMySQLdb') and harvester_config.db.useMySQLdb is True:
import MySQLdb
if isinstance(exc, MySQLdb.OperationalError):
isOperationalError = True
else:
import mysql.connector
if isinstance(exc, mysql.connector.errors.OperationalError):
isOperationalError = True
if isOperationalError:
try_timestamp = time.time()
while time.time() - try_timestamp < retry_time:
try:
self.__init__()
tmpLog.info('renewed connection')
break
except Exception as e:
tmpLog.error('failed to renew connection; {0}'.format(e))
time.sleep(1)
# convert param dict to list
def convert_params(self, sql, varmap):
# lock database if application side lock is used
if self.usingAppLock and \
(re.search('^INSERT', sql, re.I) is not None
or re.search('^UPDATE', sql, re.I) is not None
or re.search(' FOR UPDATE', sql, re.I) is not None
or re.search('^DELETE', sql, re.I) is not None
):
self.lockDB = True
# remove FOR UPDATE for sqlite
if harvester_config.db.engine == 'sqlite':
sql = re.sub(' FOR UPDATE', ' ', sql, re.I)
sql = re.sub('INSERT IGNORE', 'INSERT OR IGNORE', sql, re.I)
else:
sql = re.sub('INSERT OR IGNORE', 'INSERT IGNORE', sql, re.I)
# no conversation unless dict
if not isinstance(varmap, dict):
# using the printf style syntax for mariaDB
if harvester_config.db.engine == 'mariadb':
sql = re.sub(':[^ $,)]+', '%s', sql)
return sql, varmap
paramList = []
# extract placeholders
items = re.findall(':[^ $,)]+', sql)
for item in items:
if item not in varmap:
raise KeyError('{0} is missing in SQL parameters'.format(item))
if item not in paramList:
paramList.append(varmap[item])
# using the printf style syntax for mariaDB
if harvester_config.db.engine == 'mariadb':
sql = re.sub(':[^ $,)]+', '%s', sql)
return sql, paramList
# wrapper for execute
def execute(self, sql, varmap=None):
sw = core_utils.get_stopwatch()
if varmap is None:
varmap = dict()
# get lock if application side lock is used
if self.usingAppLock and not self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} locking'.format(self.thrName))
conLock.acquire()
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} locked'.format(self.thrName))
# execute
try:
# verbose
if harvester_config.db.verbose:
if not self.useInspect:
self.verbLog.debug('thr={2} sql={0} var={1}'.format(sql, str(varmap), self.thrName))
else:
self.verbLog.debug('thr={3} sql={0} var={1} exec={2}'.format(sql, str(varmap),
inspect.stack()[1][3],
self.thrName))
# convert param dict
newSQL, params = self.convert_params(sql, varmap)
# execute
try:
retVal = self.cur.execute(newSQL, params)
except Exception as e:
self._handle_exception(e)
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} exception during execute'.format(self.thrName))
raise
finally:
# release lock
if self.usingAppLock and not self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} release'.format(self.thrName))
conLock.release()
# return
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} {1} sql=[{2}]'.format(self.thrName, sw.get_elapsed_time(),
newSQL.replace('\n', ' ').strip()))
return retVal
# wrapper for executemany
def executemany(self, sql, varmap_list):
# get lock
if self.usingAppLock and not self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} locking'.format(self.thrName))
conLock.acquire()
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} locked'.format(self.thrName))
try:
# verbose
if harvester_config.db.verbose:
if not self.useInspect:
self.verbLog.debug('thr={2} sql={0} var={1}'.format(sql, str(varmap_list), self.thrName))
else:
self.verbLog.debug('thr={3} sql={0} var={1} exec={2}'.format(sql, str(varmap_list),
inspect.stack()[1][3],
self.thrName))
# convert param dict
paramList = []
newSQL = sql
for varMap in varmap_list:
if varMap is None:
varMap = dict()
newSQL, params = self.convert_params(sql, varMap)
paramList.append(params)
# execute
try:
if harvester_config.db.engine == 'sqlite':
retVal = []
iList = 0
nList = 5000
while iList < len(paramList):
retVal += self.cur.executemany(newSQL, paramList[iList:iList+nList])
iList += nList
else:
retVal = self.cur.executemany(newSQL, paramList)
except Exception as e:
self._handle_exception(e)
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} exception during executemany'.format(self.thrName))
raise
finally:
# release lock
if self.usingAppLock and not self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} release'.format(self.thrName))
conLock.release()
# return
return retVal
# commit
def commit(self):
try:
self.con.commit()
except Exception as e:
self._handle_exception(e)
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} exception during commit'.format(self.thrName))
raise
if self.usingAppLock and self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} release with commit'.format(self.thrName))
conLock.release()
self.lockDB = False
# rollback
def rollback(self):
try:
self.con.rollback()
except Exception as e:
self._handle_exception(e)
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} exception during rollback'.format(self.thrName))
finally:
if self.usingAppLock and self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} release with rollback'.format(self.thrName))
conLock.release()
self.lockDB = False
# type conversion
def type_conversion(self, attr_type):
# remove decorator
attr_type = attr_type.split('/')[0]
attr_type = attr_type.strip()
if attr_type == 'timestamp':
# add NULL attribute to disable automatic update
attr_type += ' null'
# type conversion
if harvester_config.db.engine == 'mariadb':
if attr_type.startswith('text'):
attr_type = attr_type.replace('text', 'varchar(256)')
elif attr_type.startswith('blob'):
attr_type = attr_type.replace('blob', 'longtext')
elif attr_type.startswith('integer'):
attr_type = attr_type.replace('integer', 'bigint')
attr_type = attr_type.replace('autoincrement', 'auto_increment')
elif harvester_config.db.engine == 'sqlite':
if attr_type.startswith('varchar'):
attr_type = re.sub('varchar\(\d+\)', 'text', attr_type)
attr_type = attr_type.replace('auto_increment', 'autoincrement')
return attr_type
# check if index is needed
def need_index(self, attr):
isIndex = False
isUnique = False
# look for separator
if '/' in attr:
decorators = attr.split('/')[-1].split()
if 'index' in decorators:
isIndex = True
if 'unique' in decorators:
isIndex = True
isUnique = True
return isIndex, isUnique
# make table
def make_table(self, cls, table_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='make_table')
tmpLog.debug('table={0}'.format(table_name))
# check if table already exists
varMap = dict()
varMap[':name'] = table_name
if harvester_config.db.engine == 'mariadb':
varMap[':schema'] = harvester_config.db.schema
sqlC = 'SELECT * FROM information_schema.tables WHERE table_schema=:schema AND table_name=:name '
else:
varMap[':type'] = 'table'
sqlC = 'SELECT name FROM sqlite_master WHERE type=:type AND tbl_name=:name '
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
indexes = []
uniques = set()
# not exists
if resC is None:
# sql to make table
sqlM = 'CREATE TABLE {0}('.format(table_name)
# collect columns
for attr in cls.attributesWithTypes:
# split to name and type
attrName, attrType = attr.split(':')
attrType = self.type_conversion(attrType)
# check if index is needed
isIndex, isUnique = self.need_index(attr)
if isIndex:
indexes.append(attrName)
if isUnique:
uniques.add(attrName)
sqlM += '{0} {1},'.format(attrName, attrType)
sqlM = sqlM[:-1]
sqlM += ')'
# make table
self.execute(sqlM)
# commit
self.commit()
tmpLog.debug('made {0}'.format(table_name))
else:
# check table
missingAttrs = self.check_table(cls, table_name, True)
if len(missingAttrs) > 0:
for attr in cls.attributesWithTypes:
# split to name and type
attrName, attrType = attr.split(':')
attrType = self.type_conversion(attrType)
# ony missing
if attrName not in missingAttrs:
continue
# check if index is needed
isIndex, isUnique = self.need_index(attr)
if isIndex:
indexes.append(attrName)
if isUnique:
uniques.add(attrName)
# add column
sqlA = 'ALTER TABLE {0} ADD COLUMN '.format(table_name)
sqlA += '{0} {1}'.format(attrName, attrType)
try:
self.execute(sqlA)
# commit
self.commit()
tmpLog.debug('added {0} to {1}'.format(attr, table_name))
except Exception:
core_utils.dump_error_message(tmpLog)
# make indexes
for index in indexes:
indexName = 'idx_{0}_{1}'.format(index, table_name)
if index in uniques:
sqlI = "CREATE UNIQUE INDEX "
else:
sqlI = "CREATE INDEX "
sqlI += "{0} ON {1}({2}) ".format(indexName, table_name, index)
try:
self.execute(sqlI)
# commit
self.commit()
tmpLog.debug('added {0}'.format(indexName))
except Exception:
core_utils.dump_error_message(tmpLog)
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
return self.check_table(cls, table_name)
# make tables
def make_tables(self, queue_config_mapper):
outStrs = []
outStrs += self.make_table(CommandSpec, commandTableName)
outStrs += self.make_table(JobSpec, jobTableName)
outStrs += self.make_table(WorkSpec, workTableName)
outStrs += self.make_table(FileSpec, fileTableName)
outStrs += self.make_table(EventSpec, eventTableName)
outStrs += self.make_table(CacheSpec, cacheTableName)
outStrs += self.make_table(SeqNumberSpec, seqNumberTableName)
outStrs += self.make_table(PandaQueueSpec, pandaQueueTableName)
outStrs += self.make_table(JobWorkerRelationSpec, jobWorkerTableName)
outStrs += self.make_table(ProcessLockSpec, processLockTableName)
outStrs += self.make_table(DiagSpec, diagTableName)
outStrs += self.make_table(QueueConfigDumpSpec, queueConfigDumpTableName)
outStrs += self.make_table(ServiceMetricSpec, serviceMetricsTableName)
# dump error messages
if len(outStrs) > 0:
errMsg = "ERROR : Definitions of some database tables are incorrect. "
errMsg += "Please add missing columns, or drop those tables "
errMsg += "so that harvester automatically re-creates those tables."
errMsg += "\n"
print (errMsg)
for outStr in outStrs:
print (outStr)
sys.exit(1)
# add sequential numbers
self.add_seq_number('SEQ_workerID', 1)
self.add_seq_number('SEQ_configID', 1)
# fill PandaQueue table
queue_config_mapper.load_data()
# delete process locks
self.clean_process_locks()
# check table
def check_table(self, cls, table_name, get_missing=False):
# get columns in DB
varMap = dict()
if harvester_config.db.engine == 'mariadb':
varMap[':name'] = table_name
sqlC = 'SELECT column_name,column_type FROM information_schema.columns WHERE table_name=:name '
else:
sqlC = 'PRAGMA table_info({0}) '.format(table_name)
self.execute(sqlC, varMap)
resC = self.cur.fetchall()
colMap = dict()
for tmpItem in resC:
if harvester_config.db.engine == 'mariadb':
if hasattr(tmpItem, '_asdict'):
tmpItem = tmpItem._asdict()
columnName, columnType = tmpItem['column_name'], tmpItem['column_type']
else:
columnName, columnType = tmpItem[1], tmpItem[2]
colMap[columnName] = columnType
self.commit()
# check with class definition
outStrs = []
for attr in cls.attributesWithTypes:
attrName, attrType = attr.split(':')
if attrName not in colMap:
if get_missing:
outStrs.append(attrName)
else:
attrType = self.type_conversion(attrType)
outStrs.append('{0} {1} is missing in {2}'.format(attrName, attrType, table_name))
return outStrs
# insert jobs
def insert_jobs(self, jobspec_list):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='insert_jobs')
tmpLog.debug('{0} jobs'.format(len(jobspec_list)))
try:
# sql to insert a job
sqlJ = "INSERT INTO {0} ({1}) ".format(jobTableName, JobSpec.column_names())
sqlJ += JobSpec.bind_values_expression()
# sql to insert a file
sqlF = "INSERT INTO {0} ({1}) ".format(fileTableName, FileSpec.column_names())
sqlF += FileSpec.bind_values_expression()
# sql to delete job
sqlDJ = "DELETE FROM {0} ".format(jobTableName)
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations
sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDR += "WHERE PandaID=:PandaID "
# loop over all jobs
varMapsJ = []
varMapsF = []
for jobSpec in jobspec_list:
# delete job just in case
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlDJ, varMap)
iDel = self.cur.rowcount
if iDel > 0:
# delete files
self.execute(sqlDF, varMap)
# delete events
self.execute(sqlDE, varMap)
# delete relations
self.execute(sqlDR, varMap)
# commit
self.commit()
# insert job and files
varMap = jobSpec.values_list()
varMapsJ.append(varMap)
for fileSpec in jobSpec.inFiles:
varMap = fileSpec.values_list()
varMapsF.append(varMap)
# insert
self.executemany(sqlJ, varMapsJ)
self.executemany(sqlF, varMapsF)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get job
def get_job(self, panda_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), method_name='get_job')
tmpLog.debug('start')
# sql to get job
sql = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sql += "WHERE PandaID=:pandaID "
# get job
varMap = dict()
varMap[':pandaID'] = panda_id
self.execute(sql, varMap)
resJ = self.cur.fetchone()
if resJ is None:
jobSpec = None
else:
# make job
jobSpec = JobSpec()
jobSpec.pack(resJ)
# get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE PandaID=:PandaID "
varMap = dict()
varMap[':PandaID'] = panda_id
self.execute(sqlF, varMap)
resFileList = self.cur.fetchall()
for resFile in resFileList:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobSpec.add_file(fileSpec)
# commit
self.commit()
tmpLog.debug('done')
# return
return jobSpec
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get all jobs (fetch entire jobTable)
def get_jobs(self):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_jobs')
tmpLog.debug('start')
# sql to get job
sql = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sql += "WHERE PandaID IS NOT NULL"
# get jobs
varMap = None
self.execute(sql, varMap)
resJobs = self.cur.fetchall()
if resJobs is None:
return None
jobSpecList=[]
# make jobs list
for resJ in resJobs:
jobSpec = JobSpec()
jobSpec.pack(resJ)
jobSpecList.append(jobSpec)
tmpLog.debug('done')
# return
return jobSpecList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# update job
def update_job(self, jobspec, criteria=None, update_in_file=False):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0} subStatus={1}'.format(jobspec.PandaID,
jobspec.subStatus),
method_name='update_job')
tmpLog.debug('start')
if criteria is None:
criteria = {}
# sql to update job
sql = "UPDATE {0} SET {1} ".format(jobTableName, jobspec.bind_update_changes_expression())
sql += "WHERE PandaID=:PandaID "
# update job
varMap = jobspec.values_map(only_changed=True)
for tmpKey, tmpVal in iteritems(criteria):
mapKey = ':{0}_cr'.format(tmpKey)
sql += "AND {0}={1} ".format(tmpKey, mapKey)
varMap[mapKey] = tmpVal
varMap[':PandaID'] = jobspec.PandaID
self.execute(sql, varMap)
nRow = self.cur.rowcount
if nRow > 0:
# update events
for eventSpec in jobspec.events:
varMap = eventSpec.values_map(only_changed=True)
if varMap != {}:
sqlE = "UPDATE {0} SET {1} ".format(eventTableName, eventSpec.bind_update_changes_expression())
sqlE += "WHERE eventRangeID=:eventRangeID "
varMap[':eventRangeID'] = eventSpec.eventRangeID
self.execute(sqlE, varMap)
# update input file
if update_in_file:
for fileSpec in jobspec.inFiles:
varMap = fileSpec.values_map(only_changed=True)
if varMap != {}:
sqlF = "UPDATE {0} SET {1} ".format(fileTableName,
fileSpec.bind_update_changes_expression())
sqlF += "WHERE fileID=:fileID "
varMap[':fileID'] = fileSpec.fileID
self.execute(sqlF, varMap)
else:
# set file status to done if jobs are done
if jobspec.is_final_status():
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':type'] = 'input'
varMap[':status'] = 'done'
sqlF = "UPDATE {0} SET status=:status ".format(fileTableName)
sqlF += "WHERE PandaID=:PandaID AND fileType=:type "
self.execute(sqlF, varMap)
# set to_delete flag
if jobspec.subStatus == 'done':
sqlD = "UPDATE {0} SET todelete=:to_delete ".format(fileTableName)
sqlD += "WHERE PandaID=:PandaID "
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':to_delete'] = 1
self.execute(sqlD, varMap)
# commit
self.commit()
tmpLog.debug('done with {0}'.format(nRow))
# return
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# insert output files into database
def insert_files(self,jobspec_list):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='insert_files')
tmpLog.debug('{0} jobs'.format(len(jobspec_list)))
try:
# sql to insert a file
sqlF = "INSERT INTO {0} ({1}) ".format(fileTableName, FileSpec.column_names())
sqlF += FileSpec.bind_values_expression()
# loop over all jobs
varMapsF = []
for jobSpec in jobspec_list:
for fileSpec in jobSpec.outFiles:
varMap = fileSpec.values_list()
varMapsF.append(varMap)
# insert
self.executemany(sqlF, varMapsF)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# update worker
def update_worker(self, workspec, criteria=None):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(workspec.workerID),
method_name='update_worker')
tmpLog.debug('start')
if criteria is None:
criteria = {}
# sql to update job
sql = "UPDATE {0} SET {1} ".format(workTableName, workspec.bind_update_changes_expression())
sql += "WHERE workerID=:workerID "
# update worker
varMap = workspec.values_map(only_changed=True)
if len(varMap) > 0:
for tmpKey, tmpVal in iteritems(criteria):
mapKey = ':{0}_cr'.format(tmpKey)
sql += "AND {0}={1} ".format(tmpKey, mapKey)
varMap[mapKey] = tmpVal
varMap[':workerID'] = workspec.workerID
self.execute(sql, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('done with {0}'.format(nRow))
else:
nRow = None
tmpLog.debug('skip since no updated attributes')
# return
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# fill panda queue table
def fill_panda_queue_table(self, panda_queue_list, queue_config_mapper):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='fill_panda_queue_table')
tmpLog.debug('start')
# get existing queues
sqlE = "SELECT queueName FROM {0} ".format(pandaQueueTableName)
varMap = dict()
self.execute(sqlE, varMap)
resE = self.cur.fetchall()
for queueName, in resE:
# delete if not listed in cfg
if queueName not in panda_queue_list:
sqlD = "DELETE FROM {0} ".format(pandaQueueTableName)
sqlD += "WHERE queueName=:queueName "
varMap = dict()
varMap[':queueName'] = queueName
self.execute(sqlD, varMap)
# commit
self.commit()
# loop over queues
for queueName in panda_queue_list:
queueConfig = queue_config_mapper.get_queue(queueName)
if queueConfig is not None:
# check if already exist
sqlC = "SELECT * FROM {0} ".format(pandaQueueTableName)
sqlC += "WHERE queueName=:queueName "
varMap = dict()
varMap[':queueName'] = queueName
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
if resC is not None:
# update limits just in case
varMap = dict()
sqlU = "UPDATE {0} SET ".format(pandaQueueTableName)
for qAttr in ['nQueueLimitJob', 'nQueueLimitWorker', 'maxWorkers',
'nQueueLimitJobRatio', 'nQueueLimitJobMax', 'nQueueLimitJobMin',
'nQueueLimitWorkerRatio', 'nQueueLimitWorkerMax', 'nQueueLimitWorkerMin']:
if hasattr(queueConfig, qAttr):
sqlU += '{0}=:{0},'.format(qAttr)
varMap[':{0}'.format(qAttr)] = getattr(queueConfig, qAttr)
if len(varMap) == 0:
continue
sqlU = sqlU[:-1]
sqlU += " WHERE queueName=:queueName "
varMap[':queueName'] = queueName
self.execute(sqlU, varMap)
else:
# insert queue
varMap = dict()
varMap[':queueName'] = queueName
attrName_list = []
tmpKey_list = []
for attrName in PandaQueueSpec.column_names().split(','):
if hasattr(queueConfig, attrName):
tmpKey = ':{0}'.format(attrName)
attrName_list.append(attrName)
tmpKey_list.append(tmpKey)
varMap[tmpKey] = getattr(queueConfig, attrName)
sqlP = "INSERT IGNORE INTO {0} ({1}) ".format(pandaQueueTableName, ','.join(attrName_list))
sqlS = "VALUES ({0}) ".format(','.join(tmpKey_list))
self.execute(sqlP + sqlS, varMap)
# commit
self.commit()
tmpLog.debug('done')
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get number of jobs to fetch
def get_num_jobs_to_fetch(self, n_queues, interval):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_num_jobs_to_fetch')
try:
tmpLog.debug('start')
retMap = {}
# sql to get queues
sqlQ = "SELECT queueName,nQueueLimitJob,nQueueLimitJobRatio,nQueueLimitJobMax,nQueueLimitJobMin "
sqlQ += "FROM {0} ".format(pandaQueueTableName)
sqlQ += "WHERE jobFetchTime IS NULL OR jobFetchTime<:timeLimit "
sqlQ += "ORDER BY jobFetchTime "
# sql to count nQueue
sqlN = "SELECT COUNT(*) cnt,status FROM {0} ".format(jobTableName)
sqlN += "WHERE computingSite=:computingSite AND status IN (:status1,:status2) "
sqlN += "GROUP BY status "
# sql to update timestamp
sqlU = "UPDATE {0} SET jobFetchTime=:jobFetchTime ".format(pandaQueueTableName)
sqlU += "WHERE queueName=:queueName "
sqlU += "AND (jobFetchTime IS NULL OR jobFetchTime<:timeLimit) "
# get queues
timeNow = datetime.datetime.utcnow()
varMap = dict()
varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=interval)
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
iQueues = 0
for queueName, nQueueLimitJob, nQueueLimitJobRatio, \
nQueueLimitJobMax, nQueueLimitJobMin in resQ:
# update timestamp to lock the queue
varMap = dict()
varMap[':queueName'] = queueName
varMap[':jobFetchTime'] = timeNow
varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=interval)
self.execute(sqlU, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
# skip if not locked
if nRow == 0:
continue
# count nQueue
varMap = dict()
varMap[':computingSite'] = queueName
varMap[':status1'] = 'starting'
varMap[':status2'] = 'running'
self.execute(sqlN, varMap)
resN = self.cur.fetchall()
nsMap = dict()
for tmpN, tmpStatus in resN:
nsMap[tmpStatus] = tmpN
# get num of queued jobs
try:
nQueue = nsMap['starting']
except Exception:
nQueue = 0
# dynamic nQueueLimitJob
if nQueueLimitJobRatio is not None and nQueueLimitJobRatio > 0:
try:
nRunning = nsMap['running']
except Exception:
nRunning = 0
nQueueLimitJob = int(nRunning * nQueueLimitJobRatio / 100)
if nQueueLimitJobMin is None:
nQueueLimitJobMin = 1
nQueueLimitJob = max(nQueueLimitJob, nQueueLimitJobMin)
if nQueueLimitJobMax is not None:
nQueueLimitJob = min(nQueueLimitJob, nQueueLimitJobMax)
# more jobs need to be queued
if nQueueLimitJob is not None and nQueue < nQueueLimitJob:
retMap[queueName] = nQueueLimitJob - nQueue
# enough queues
iQueues += 1
if iQueues >= n_queues:
break
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return {}
# get jobs to propagate checkpoints
def get_jobs_to_propagate(self, max_jobs, lock_interval, update_interval, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'thr={0}'.format(locked_by),
method_name='get_jobs_to_propagate')
tmpLog.debug('start')
# sql to get jobs
sql = "SELECT PandaID FROM {0} ".format(jobTableName)
sql += "WHERE propagatorTime IS NOT NULL "
sql += "AND ((propagatorTime<:lockTimeLimit AND propagatorLock IS NOT NULL) "
sql += "OR (propagatorTime<:updateTimeLimit AND propagatorLock IS NULL)) "
sql += "ORDER BY propagatorTime LIMIT {0} ".format(max_jobs)
# sql to get jobs
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to lock job
sqlL = "UPDATE {0} SET propagatorTime=:timeNow,propagatorLock=:lockedBy ".format(jobTableName)
sqlL += "WHERE PandaID=:PandaID "
sqlL += "AND ((propagatorTime<:lockTimeLimit AND propagatorLock IS NOT NULL) "
sqlL += "OR (propagatorTime<:updateTimeLimit AND propagatorLock IS NULL)) "
# sql to get events
sqlE = "SELECT {0} FROM {1} ".format(EventSpec.column_names(), eventTableName)
sqlE += "WHERE PandaID=:PandaID AND subStatus IN (:statusFinished,:statusFailed) "
# sql to get file
sqlF = "SELECT DISTINCT {0} FROM {1} f, {2} e, {1} f2 ".format(FileSpec.column_names('f2'),
fileTableName,
eventTableName)
sqlF += "WHERE e.PandaID=:PandaID AND e.fileID=f.fileID "
sqlF += "AND e.subStatus IN (:statusFinished,:statusFailed) "
sqlF += "AND f2.fileID=f.zipFileID "
# sql to get fileID of zip
sqlZ = "SELECT e.fileID,f.zipFileID FROM {0} f, {1} e ".format(fileTableName, eventTableName)
sqlZ += "WHERE e.PandaID=:PandaID AND e.fileID=f.fileID "
sqlZ += "AND e.subStatus IN (:statusFinished,:statusFailed) "
# get jobs
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
updateTimeLimit = timeNow - datetime.timedelta(seconds=update_interval)
varMap = dict()
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':updateTimeLimit'] = updateTimeLimit
self.execute(sql, varMap)
resList = self.cur.fetchall()
pandaIDs = []
for pandaID, in resList:
pandaIDs.append(pandaID)
# partially randomise to increase success rate for lock
nJobs = int(max_jobs * 0.2)
subPandaIDs = list(pandaIDs[nJobs:])
random.shuffle(subPandaIDs)
pandaIDs = pandaIDs[:nJobs] + subPandaIDs
pandaIDs = pandaIDs[:max_jobs]
jobSpecList = []
iEvents = 0
for pandaID in pandaIDs:
# avoid a bulk update for many jobs with too many events
if iEvents > 10000:
break
# lock job
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':timeNow'] = timeNow
varMap[':lockedBy'] = locked_by
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':updateTimeLimit'] = updateTimeLimit
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
if nRow > 0:
# read job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
res = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(res)
jobSpec.propagatorLock = locked_by
zipFiles = {}
zipIdMap = dict()
# get zipIDs
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':statusFinished'] = 'finished'
varMap[':statusFailed'] = 'failed'
self.execute(sqlZ, varMap)
resZ = self.cur.fetchall()
for tmpFileID, tmpZipFileID in resZ:
zipIdMap[tmpFileID] = tmpZipFileID
# get zip files
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':statusFinished'] = 'finished'
varMap[':statusFailed'] = 'failed'
self.execute(sqlF, varMap)
resFs = self.cur.fetchall()
for resF in resFs:
fileSpec = FileSpec()
fileSpec.pack(resF)
zipFiles[fileSpec.fileID] = fileSpec
# read events
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':statusFinished'] = 'finished'
varMap[':statusFailed'] = 'failed'
self.execute(sqlE, varMap)
resEs = self.cur.fetchall()
for resE in resEs:
eventSpec = EventSpec()
eventSpec.pack(resE)
zipFileSpec = None
# get associated zip file if any
if eventSpec.fileID is not None:
if eventSpec.fileID not in zipIdMap:
continue
zipFileID = zipIdMap[eventSpec.fileID]
if zipFileID is not None:
zipFileSpec = zipFiles[zipFileID]
jobSpec.add_event(eventSpec, zipFileSpec)
iEvents += 1
jobSpecList.append(jobSpec)
tmpLog.debug('got {0} jobs'.format(len(jobSpecList)))
return jobSpecList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get jobs in sub status
def get_jobs_in_sub_status(self, sub_status, max_jobs, time_column=None, lock_column=None,
interval_without_lock=None, interval_with_lock=None,
locked_by=None, new_sub_status=None):
try:
# get logger
if locked_by is None:
msgPfx = None
else:
msgPfx = 'id={0}'.format(locked_by)
tmpLog = core_utils.make_logger(_logger, msgPfx, method_name='get_jobs_in_sub_status')
tmpLog.debug('start subStatus={0} timeColumn={1}'.format(sub_status, time_column))
timeNow = datetime.datetime.utcnow()
# sql to count jobs being processed
sqlC = "SELECT COUNT(*) cnt FROM {0} ".format(jobTableName)
sqlC += "WHERE ({0} IS NOT NULL AND subStatus=:subStatus ".format(lock_column)
if time_column is not None and interval_with_lock is not None:
sqlC += "AND ({0} IS NOT NULL AND {0}>:lockTimeLimit) ".format(time_column)
sqlC += ") OR subStatus=:newSubStatus "
# count jobs
if max_jobs > 0 and new_sub_status is not None:
varMap = dict()
varMap[':subStatus'] = sub_status
varMap[':newSubStatus'] = new_sub_status
if time_column is not None and interval_with_lock is not None:
varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock)
self.execute(sqlC, varMap)
nProcessing, = self.cur.fetchone()
if nProcessing >= max_jobs:
# commit
self.commit()
tmpLog.debug('enough jobs {0} are being processed in {1} state'.format(nProcessing,
new_sub_status))
return []
max_jobs -= nProcessing
# sql to get job IDs
sql = "SELECT PandaID FROM {0} ".format(jobTableName)
sql += "WHERE subStatus=:subStatus "
if time_column is not None:
sql += "AND ({0} IS NULL ".format(time_column)
if interval_with_lock is not None:
sql += "OR ({0}<:lockTimeLimit AND {1} IS NOT NULL) ".format(time_column, lock_column)
if interval_without_lock is not None:
sql += "OR ({0}<:updateTimeLimit AND {1} IS NULL) ".format(time_column, lock_column)
sql += ') '
sql += "ORDER BY {0} ".format(time_column)
# sql to lock job
sqlL = "UPDATE {0} SET {1}=:timeNow,{2}=:lockedBy ".format(jobTableName, time_column, lock_column)
sqlL += "WHERE PandaID=:PandaID AND subStatus=:subStatus "
if time_column is not None:
sqlL += "AND ({0} IS NULL ".format(time_column)
if interval_with_lock is not None:
sqlL += "OR ({0}<:lockTimeLimit AND {1} IS NOT NULL) ".format(time_column, lock_column)
if interval_without_lock is not None:
sqlL += "OR ({0}<:updateTimeLimit AND {1} IS NULL) ".format(time_column, lock_column)
sqlL += ') '
# sql to get jobs
sqlGJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sqlGJ += "WHERE PandaID=:PandaID "
# sql to get file
sqlGF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlGF += "WHERE PandaID=:PandaID AND fileType=:type "
# get jobs
varMap = dict()
varMap[':subStatus'] = sub_status
if interval_with_lock is not None:
varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock)
if interval_without_lock is not None:
varMap[':updateTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_without_lock)
self.execute(sql, varMap)
resList = self.cur.fetchall()
pandaIDs = []
for pandaID, in resList:
pandaIDs.append(pandaID)
# partially randomise to increase success rate for lock
nJobs = int(max_jobs * 0.2)
subPandaIDs = list(pandaIDs[nJobs:])
random.shuffle(subPandaIDs)
pandaIDs = pandaIDs[:nJobs] + subPandaIDs
pandaIDs = pandaIDs[:max_jobs]
jobSpecList = []
for pandaID in pandaIDs:
# lock job
if locked_by is not None:
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':timeNow'] = timeNow
varMap[':lockedBy'] = locked_by
varMap[':subStatus'] = sub_status
if interval_with_lock is not None:
varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock)
if interval_without_lock is not None:
varMap[':updateTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_without_lock)
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
else:
nRow = 1
if nRow > 0:
# get job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlGJ, varMap)
resGJ = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(resGJ)
if locked_by is not None:
jobSpec.lockedBy = locked_by
setattr(jobSpec, time_column, timeNow)
# get files
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':type'] = 'input'
self.execute(sqlGF, varMap)
resGF = self.cur.fetchall()
for resFile in resGF:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobSpec.add_in_file(fileSpec)
# append
jobSpecList.append(jobSpec)
tmpLog.debug('got {0} jobs'.format(len(jobSpecList)))
return jobSpecList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# register a worker
def register_worker(self, workspec, jobspec_list, locked_by):
tmpLog = core_utils.make_logger(_logger, 'batchID={0}'.format(workspec.batchID),
method_name='register_worker')
try:
tmpLog.debug('start')
# sql to check if exists
sqlE = "SELECT 1 c FROM {0} WHERE workerID=:workerID ".format(workTableName)
# sql to insert job and worker relationship
sqlR = "INSERT INTO {0} ({1}) ".format(jobWorkerTableName, JobWorkerRelationSpec.column_names())
sqlR += JobWorkerRelationSpec.bind_values_expression()
# sql to get number of workers
sqlNW = "SELECT DISTINCT t.workerID FROM {0} t, {1} w ".format(jobWorkerTableName, workTableName)
sqlNW += "WHERE t.PandaID=:pandaID AND w.workerID=t.workerID "
sqlNW += "AND w.status IN (:st_submitted,:st_running,:st_idle) "
# sql to decrement nNewWorkers
sqlDN = "UPDATE {0} ".format(pandaQueueTableName)
sqlDN += "SET nNewWorkers=nNewWorkers-1 "
sqlDN += "WHERE queueName=:queueName AND nNewWorkers IS NOT NULL AND nNewWorkers>0 "
# insert worker if new
isNew = False
if workspec.isNew:
varMap = dict()
varMap[':workerID'] = workspec.workerID
self.execute(sqlE, varMap)
resE = self.cur.fetchone()
if resE is None:
isNew = True
if isNew:
# insert a worker
sqlI = "INSERT INTO {0} ({1}) ".format(workTableName, WorkSpec.column_names())
sqlI += WorkSpec.bind_values_expression()
varMap = workspec.values_list()
self.execute(sqlI, varMap)
# decrement nNewWorkers
varMap = dict()
varMap[':queueName'] = workspec.computingSite
self.execute(sqlDN, varMap)
else:
# not update workerID
workspec.force_not_update('workerID')
# update a worker
sqlU = "UPDATE {0} SET {1} ".format(workTableName, workspec.bind_update_changes_expression())
sqlU += "WHERE workerID=:workerID "
varMap = workspec.values_map(only_changed=True)
varMap[':workerID'] = workspec.workerID
self.execute(sqlU, varMap)
# collect values to update jobs or insert job/worker mapping
varMapsR = []
if jobspec_list is not None:
for jobSpec in jobspec_list:
# get number of workers for the job
varMap = dict()
varMap[':pandaID'] = jobSpec.PandaID
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlNW, varMap)
resNW = self.cur.fetchall()
workerIDs = set()
workerIDs.add(workspec.workerID)
for tmpWorkerID, in resNW:
workerIDs.add(tmpWorkerID)
# update attributes
if jobSpec.subStatus in ['submitted', 'running']:
jobSpec.nWorkers = len(workerIDs)
try:
jobSpec.nWorkersInTotal += 1
except Exception:
jobSpec.nWorkersInTotal = jobSpec.nWorkers
elif workspec.hasJob == 1:
if workspec.status == WorkSpec.ST_missed:
# not update if other workers are active
if len(workerIDs) > 1:
continue
core_utils.update_job_attributes_with_workers(workspec.mapType, [jobSpec],
[workspec], {}, {})
jobSpec.trigger_propagation()
else:
jobSpec.subStatus = 'submitted'
jobSpec.nWorkers = len(workerIDs)
try:
jobSpec.nWorkersInTotal += 1
except Exception:
jobSpec.nWorkersInTotal = jobSpec.nWorkers
else:
if workspec.status == WorkSpec.ST_missed:
# not update if other workers are active
if len(workerIDs) > 1:
continue
core_utils.update_job_attributes_with_workers(workspec.mapType, [jobSpec],
[workspec], {}, {})
jobSpec.trigger_propagation()
else:
jobSpec.subStatus = 'queued'
# sql to update job
if len(jobSpec.values_map(only_changed=True)) > 0:
sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobSpec.bind_update_changes_expression())
sqlJ += "WHERE PandaID=:cr_PandaID AND lockedBy=:cr_lockedBy "
# update job
varMap = jobSpec.values_map(only_changed=True)
varMap[':cr_PandaID'] = jobSpec.PandaID
varMap[':cr_lockedBy'] = locked_by
self.execute(sqlJ, varMap)
if jobSpec.subStatus in ['submitted', 'running']:
# values for job/worker mapping
jwRelation = JobWorkerRelationSpec()
jwRelation.PandaID = jobSpec.PandaID
jwRelation.workerID = workspec.workerID
varMap = jwRelation.values_list()
varMapsR.append(varMap)
# insert job/worker mapping
if len(varMapsR) > 0:
self.executemany(sqlR, varMapsR)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# insert workers
def insert_workers(self, workspec_list, locked_by):
tmpLog = core_utils.make_logger(_logger, 'locked_by={0}'.format(locked_by),
method_name='insert_workers')
try:
tmpLog.debug('start')
timeNow = datetime.datetime.utcnow()
# sql to insert a worker
sqlI = "INSERT INTO {0} ({1}) ".format(workTableName, WorkSpec.column_names())
sqlI += WorkSpec.bind_values_expression()
for workSpec in workspec_list:
tmpWorkSpec = copy.copy(workSpec)
# insert worker if new
if not tmpWorkSpec.isNew:
continue
tmpWorkSpec.modificationTime = timeNow
tmpWorkSpec.status = WorkSpec.ST_pending
varMap = tmpWorkSpec.values_list()
self.execute(sqlI, varMap)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get queues to submit workers
def get_queues_to_submit(self, n_queues, lookup_interval, lock_interval, locked_by, queue_lock_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_queues_to_submit')
tmpLog.debug('start')
retMap = dict()
siteName = None
resourceMap = dict()
# sql to get a site
sqlS = "SELECT siteName FROM {0} ".format(pandaQueueTableName)
sqlS += "WHERE submitTime IS NULL "
sqlS += "OR (submitTime<:lockTimeLimit AND lockedBy IS NOT NULL) "
sqlS += "OR (submitTime<:lookupTimeLimit AND lockedBy IS NULL) "
sqlS += "ORDER BY submitTime "
# sql to get queues
sqlQ = "SELECT queueName,resourceType,nNewWorkers FROM {0} ".format(pandaQueueTableName)
sqlQ += "WHERE siteName=:siteName "
# sql to get orphaned workers
sqlO = "SELECT workerID FROM {0} ".format(workTableName)
sqlO += "WHERE computingSite=:computingSite "
sqlO += "AND status=:status AND modificationTime<:timeLimit "
# sql to delete orphaned workers. Not to use bulk delete to avoid deadlock with 0-record deletion
sqlD = "DELETE FROM {0} ".format(workTableName)
sqlD += "WHERE workerID=:workerID "
# sql to count nQueue
sqlN = "SELECT status,COUNT(*) cnt FROM {0} ".format(workTableName)
sqlN += "WHERE computingSite=:computingSite "
# sql to count re-fillers
sqlR = "SELECT COUNT(*) cnt FROM {0} ".format(workTableName)
sqlR += "WHERE computingSite=:computingSite AND status=:status "
sqlR += "AND nJobsToReFill IS NOT NULL AND nJobsToReFill>0 "
# sql to update timestamp and lock site
sqlU = "UPDATE {0} SET submitTime=:submitTime,lockedBy=:lockedBy ".format(pandaQueueTableName)
sqlU += "WHERE siteName=:siteName "
sqlU += "AND (submitTime IS NULL OR submitTime<:timeLimit) "
# get sites
timeNow = datetime.datetime.utcnow()
varMap = dict()
varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=queue_lock_interval)
varMap[':lookupTimeLimit'] = timeNow - datetime.timedelta(seconds=lookup_interval)
self.execute(sqlS, varMap)
resS = self.cur.fetchall()
for siteName, in resS:
# update timestamp to lock the site
varMap = dict()
varMap[':siteName'] = siteName
varMap[':submitTime'] = timeNow
varMap[':lockedBy'] = locked_by
varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=lookup_interval)
self.execute(sqlU, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
# skip if not locked
if nRow == 0:
continue
# get queues
varMap = dict()
varMap[':siteName'] = siteName
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
for queueName, resourceType, nNewWorkers in resQ:
# delete orphaned workers
varMap = dict()
varMap[':computingSite'] = queueName
varMap[':status'] = WorkSpec.ST_pending
varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=lock_interval)
sqlO_tmp = sqlO
if resourceType != 'ANY':
varMap[':resourceType'] = resourceType
sqlO_tmp += "AND resourceType=:resourceType "
self.execute(sqlO_tmp, varMap)
resO = self.cur.fetchall()
for tmpWorkerID, in resO:
varMap = dict()
varMap[':workerID'] = tmpWorkerID
self.execute(sqlD, varMap)
# commit
self.commit()
# count nQueue
varMap = dict()
varMap[':computingSite'] = queueName
varMap[':resourceType'] = resourceType
sqlN_tmp = sqlN
if resourceType != 'ANY':
varMap[':resourceType'] = resourceType
sqlN_tmp += "AND resourceType=:resourceType "
sqlN_tmp += "GROUP BY status "
self.execute(sqlN_tmp, varMap)
nQueue = 0
nReady = 0
nRunning = 0
for workerStatus, tmpNum in self.cur.fetchall():
if workerStatus in [WorkSpec.ST_submitted, WorkSpec.ST_pending, WorkSpec.ST_idle]:
nQueue += tmpNum
elif workerStatus in [WorkSpec.ST_ready]:
nReady += tmpNum
elif workerStatus in [WorkSpec.ST_running]:
nRunning += tmpNum
# count nFillers
varMap = dict()
varMap[':computingSite'] = queueName
varMap[':status'] = WorkSpec.ST_running
sqlR_tmp = sqlR
if resourceType != 'ANY':
varMap[':resourceType'] = resourceType
sqlR_tmp += "AND resourceType=:resourceType "
self.execute(sqlR_tmp, varMap)
nReFill, = self.cur.fetchone()
nReady += nReFill
# add
retMap.setdefault(queueName, {})
retMap[queueName][resourceType] = {'nReady': nReady,
'nRunning': nRunning,
'nQueue': nQueue,
'nNewWorkers': nNewWorkers}
resourceMap[resourceType] = queueName
# enough queues
if len(retMap) >= 0:
break
tmpLog.debug('got retMap {0}'.format(str(retMap)))
tmpLog.debug('got siteName {0}'.format(str(siteName)))
tmpLog.debug('got resourceMap {0}'.format(str(resourceMap)))
return retMap, siteName, resourceMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}, None, {}
# get job chunks to make workers
def get_job_chunks_for_workers(self, queue_name, n_workers, n_ready, n_jobs_per_worker, n_workers_per_job,
use_job_late_binding, check_interval, lock_interval, locked_by,
allow_job_mixture=False, max_workers_per_job_in_total=None,
max_workers_per_job_per_cycle=None):
toCommit = False
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'queue={0}'.format(queue_name),
method_name='get_job_chunks_for_workers')
tmpLog.debug('start')
# define maxJobs
if n_jobs_per_worker is not None:
maxJobs = (n_workers + n_ready) * n_jobs_per_worker
else:
maxJobs = -(-(n_workers + n_ready) // n_workers_per_job)
# core part of sql
# submitted and running are for multi-workers
sqlCore = "WHERE (subStatus IN (:subStat1,:subStat2) OR (subStatus IN (:subStat3,:subStat4) "
sqlCore += "AND nWorkers IS NOT NULL AND nWorkersLimit IS NOT NULL AND nWorkers<nWorkersLimit "
sqlCore += "AND moreWorkers IS NULL AND (maxWorkersInTotal IS NULL OR nWorkersInTotal IS NULL "
sqlCore += "OR nWorkersInTotal<maxWorkersInTotal))) "
sqlCore += "AND (submitterTime IS NULL "
sqlCore += "OR (submitterTime<:lockTimeLimit AND lockedBy IS NOT NULL) "
sqlCore += "OR (submitterTime<:checkTimeLimit AND lockedBy IS NULL)) "
sqlCore += "AND computingSite=:queueName "
# sql to get job IDs
sqlP = "SELECT PandaID FROM {0} ".format(jobTableName)
sqlP += sqlCore
sqlP += "ORDER BY currentPriority DESC,taskID,PandaID "
# sql to get job
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to lock job
sqlL = "UPDATE {0} SET submitterTime=:timeNow,lockedBy=:lockedBy ".format(jobTableName)
sqlL += sqlCore
sqlL += "AND PandaID=:PandaID "
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
checkTimeLimit = timeNow - datetime.timedelta(seconds=check_interval)
# sql to get file
sqlGF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlGF += "WHERE PandaID=:PandaID AND fileType=:type "
jobChunkList = []
# count jobs for nJobsPerWorker>1
nAvailableJobs = None
if n_jobs_per_worker is not None and n_jobs_per_worker > 1:
toCommit = True
# sql to count jobs
sqlC = "SELECT COUNT(*) cnt FROM {0} ".format(jobTableName)
sqlC += sqlCore
# count jobs
varMap = dict()
varMap[':subStat1'] = 'prepared'
varMap[':subStat2'] = 'queued'
varMap[':subStat3'] = 'submitted'
varMap[':subStat4'] = 'running'
varMap[':queueName'] = queue_name
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
self.execute(sqlC, varMap)
nAvailableJobs, = self.cur.fetchone()
maxJobs = int(min(maxJobs, nAvailableJobs) / n_jobs_per_worker) * n_jobs_per_worker
tmpStr = 'n_workers={0} n_ready={1} '.format(n_workers, n_ready)
tmpStr += 'n_jobs_per_worker={0} n_workers_per_job={1} '.format(n_jobs_per_worker, n_workers_per_job)
tmpStr += 'n_ava_jobs={0}'.format(nAvailableJobs)
tmpLog.debug(tmpStr)
if maxJobs == 0:
tmpStr = 'skip due to maxJobs=0'
tmpLog.debug(tmpStr)
else:
# get job IDs
varMap = dict()
varMap[':subStat1'] = 'prepared'
varMap[':subStat2'] = 'queued'
varMap[':subStat3'] = 'submitted'
varMap[':subStat4'] = 'running'
varMap[':queueName'] = queue_name
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
self.execute(sqlP, varMap)
resP = self.cur.fetchall()
tmpStr = 'fetched {0} jobs'.format(len(resP))
tmpLog.debug(tmpStr)
jobChunk = []
iJobs = 0
for pandaID, in resP:
toCommit = True
toEscape = False
# lock job
varMap = dict()
varMap[':subStat1'] = 'prepared'
varMap[':subStat2'] = 'queued'
varMap[':subStat3'] = 'submitted'
varMap[':subStat4'] = 'running'
varMap[':queueName'] = queue_name
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
varMap[':PandaID'] = pandaID
varMap[':timeNow'] = timeNow
varMap[':lockedBy'] = locked_by
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
if nRow > 0:
iJobs += 1
# get job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(resJ)
jobSpec.lockedBy = locked_by
# get files
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':type'] = 'input'
self.execute(sqlGF, varMap)
resGF = self.cur.fetchall()
for resFile in resGF:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobSpec.add_in_file(fileSpec)
# new chunk
if len(jobChunk) > 0 and jobChunk[0].taskID != jobSpec.taskID and not allow_job_mixture:
tmpLog.debug('new chunk with {0} jobs due to taskID change'.format(len(jobChunk)))
jobChunkList.append(jobChunk)
jobChunk = []
# only prepared for new worker
if len(jobChunkList) >= n_ready and jobSpec.subStatus == 'queued':
toCommit = False
else:
jobChunk.append(jobSpec)
# enough jobs in chunk
if n_jobs_per_worker is not None and len(jobChunk) >= n_jobs_per_worker:
tmpLog.debug('new chunk with {0} jobs due to n_jobs_per_worker'.format(len(jobChunk)))
jobChunkList.append(jobChunk)
jobChunk = []
# one job per multiple workers
elif n_workers_per_job is not None:
if jobSpec.nWorkersLimit is None:
jobSpec.nWorkersLimit = n_workers_per_job
if max_workers_per_job_in_total is not None:
jobSpec.maxWorkersInTotal = max_workers_per_job_in_total
nMultiWorkers = min(jobSpec.nWorkersLimit - jobSpec.nWorkers,
n_workers - len(jobChunkList))
if jobSpec.maxWorkersInTotal is not None and jobSpec.nWorkersInTotal is not None:
nMultiWorkers = min(nMultiWorkers,
jobSpec.maxWorkersInTotal - jobSpec.nWorkersInTotal)
if max_workers_per_job_per_cycle is not None:
nMultiWorkers = min(nMultiWorkers, max_workers_per_job_per_cycle)
if nMultiWorkers < 0:
nMultiWorkers = 0
tmpLog.debug(
'new {0} chunks with {1} jobs due to n_workers_per_job'.format(nMultiWorkers,
len(jobChunk)))
for i in range(nMultiWorkers):
jobChunkList.append(jobChunk)
jobChunk = []
# enough job chunks
if len(jobChunkList) >= n_workers:
toEscape = True
if toCommit:
self.commit()
else:
self.rollback()
if toEscape or iJobs >= maxJobs:
break
tmpLog.debug('got {0} job chunks'.format(len(jobChunkList)))
return jobChunkList
except Exception:
# roll back
if toCommit:
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get workers to monitor
def get_workers_to_update(self, max_workers, check_interval, lock_interval, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_update')
tmpLog.debug('start')
# sql to get workers
sqlW = "SELECT workerID,configID,mapType FROM {0} ".format(workTableName)
sqlW += "WHERE status IN (:st_submitted,:st_running,:st_idle) "
sqlW += "AND ((modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL) "
sqlW += "OR (modificationTime<:checkTimeLimit AND lockedBy IS NULL)) "
sqlW += "ORDER BY modificationTime LIMIT {0} ".format(max_workers)
# sql to lock worker without time check
sqlL = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(workTableName)
sqlL += "WHERE workerID=:workerID "
# sql to update modificationTime
sqlLM = "UPDATE {0} SET modificationTime=:timeNow ".format(workTableName)
sqlLM += "WHERE workerID=:workerID "
# sql to lock worker with time check
sqlLT = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(workTableName)
sqlLT += "WHERE workerID=:workerID "
sqlLT += "AND status IN (:st_submitted,:st_running,:st_idle) "
sqlLT += "AND ((modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL) "
sqlLT += "OR (modificationTime<:checkTimeLimit AND lockedBy IS NULL)) "
# sql to get associated workerIDs
sqlA = "SELECT t.workerID FROM {0} t, {0} s, {1} w ".format(jobWorkerTableName, workTableName)
sqlA += "WHERE s.PandaID=t.PandaID AND s.workerID=:workerID "
sqlA += "AND w.workerID=t.workerID AND w.status IN (:st_submitted,:st_running,:st_idle) "
# sql to get associated workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
# sql to get associated PandaIDs
sqlP = "SELECT PandaID FROM {0} ".format(jobWorkerTableName)
sqlP += "WHERE workerID=:workerID "
# get workerIDs
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
checkTimeLimit = timeNow - datetime.timedelta(seconds=check_interval)
varMap = dict()
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
tmpWorkers = set()
for workerID, configID, mapType in resW:
# ignore configID
if not core_utils.dynamic_plugin_change():
configID = None
tmpWorkers.add((workerID, configID, mapType))
checkedIDs = set()
retVal = {}
for workerID, configID, mapType in tmpWorkers:
# skip
if workerID in checkedIDs:
continue
# get associated workerIDs
varMap = dict()
varMap[':workerID'] = workerID
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlA, varMap)
resA = self.cur.fetchall()
workerIDtoScan = set()
for tmpWorkID, in resA:
workerIDtoScan.add(tmpWorkID)
# add original ID just in case since no relation when job is not yet bound
workerIDtoScan.add(workerID)
# use only the largest worker to avoid updating the same worker set concurrently
if mapType == WorkSpec.MT_MultiWorkers:
if workerID != min(workerIDtoScan):
# update modification time
varMap = dict()
varMap[':workerID'] = workerID
varMap[':timeNow'] = timeNow
self.execute(sqlLM, varMap)
# commit
self.commit()
continue
# lock worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = timeNow
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
self.execute(sqlLT, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
# skip if not locked
if nRow == 0:
continue
# get workers
queueName = None
workersList = []
for tmpWorkID in workerIDtoScan:
checkedIDs.add(tmpWorkID)
# get worker
varMap = dict()
varMap[':workerID'] = tmpWorkID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
if queueName is None:
queueName = workSpec.computingSite
workersList.append(workSpec)
# get associated PandaIDs
varMap = dict()
varMap[':workerID'] = tmpWorkID
self.execute(sqlP, varMap)
resP = self.cur.fetchall()
workSpec.pandaid_list = []
for tmpPandaID, in resP:
workSpec.pandaid_list.append(tmpPandaID)
if len(workSpec.pandaid_list) > 0:
workSpec.nJobs = len(workSpec.pandaid_list)
# lock worker
if tmpWorkID != workerID:
varMap = dict()
varMap[':workerID'] = tmpWorkID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = timeNow
self.execute(sqlL, varMap)
workSpec.lockedBy = locked_by
workSpec.force_not_update('lockedBy')
# commit
self.commit()
# add
if queueName is not None:
retVal.setdefault(queueName, dict())
retVal[queueName].setdefault(configID, [])
retVal[queueName][configID].append(workersList)
tmpLog.debug('got {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get workers to propagate
def get_workers_to_propagate(self, max_workers, check_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_propagate')
tmpLog.debug('start')
# sql to get worker IDs
sqlW = "SELECT workerID FROM {0} ".format(workTableName)
sqlW += "WHERE lastUpdate IS NOT NULL AND lastUpdate<:checkTimeLimit "
sqlW += "ORDER BY lastUpdate "
# sql to lock worker
sqlL = "UPDATE {0} SET lastUpdate=:timeNow ".format(workTableName)
sqlL += "WHERE lastUpdate IS NOT NULL AND lastUpdate<:checkTimeLimit "
sqlL += "AND workerID=:workerID "
# sql to get associated PandaIDs
sqlA = "SELECT PandaID FROM {0} ".format(jobWorkerTableName)
sqlA += "WHERE workerID=:workerID "
# sql to get workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
timeNow = datetime.datetime.utcnow()
timeLimit = timeNow - datetime.timedelta(seconds=check_interval)
# get workerIDs
varMap = dict()
varMap[':checkTimeLimit'] = timeLimit
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
tmpWorkers = []
for workerID, in resW:
tmpWorkers.append(workerID)
# partially randomize to increase hit rate
nWorkers = int(max_workers * 0.2)
subTmpWorkers = list(tmpWorkers[nWorkers:])
random.shuffle(subTmpWorkers)
tmpWorkers = tmpWorkers[:nWorkers] + subTmpWorkers
tmpWorkers = tmpWorkers[:max_workers]
retVal = []
for workerID in tmpWorkers:
# lock worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':timeNow'] = timeNow
varMap[':checkTimeLimit'] = timeLimit
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
if nRow > 0:
# get worker
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
retVal.append(workSpec)
# get associated PandaIDs
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlA, varMap)
resA = self.cur.fetchall()
workSpec.pandaid_list = []
for pandaID, in resA:
workSpec.pandaid_list.append(pandaID)
# commit
self.commit()
tmpLog.debug('got {0} workers'.format(len(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get workers to feed events
def get_workers_to_feed_events(self, max_workers, lock_interval, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_feed_events')
tmpLog.debug('start')
# sql to get workers
sqlW = "SELECT workerID, status FROM {0} ".format(workTableName)
sqlW += "WHERE eventsRequest=:eventsRequest AND status IN (:status1,:status2) "
sqlW += "AND (eventFeedTime IS NULL OR eventFeedTime<:lockTimeLimit) "
sqlW += "ORDER BY eventFeedTime LIMIT {0} ".format(max_workers)
# sql to lock worker
sqlL = "UPDATE {0} SET eventFeedTime=:timeNow,eventFeedLock=:lockedBy ".format(workTableName)
sqlL += "WHERE eventsRequest=:eventsRequest AND status=:status "
sqlL += "AND (eventFeedTime IS NULL OR eventFeedTime<:lockTimeLimit) "
sqlL += "AND workerID=:workerID "
# sql to get associated workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
# get workerIDs
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
varMap = dict()
varMap[':status1'] = WorkSpec.ST_running
varMap[':status2'] = WorkSpec.ST_submitted
varMap[':eventsRequest'] = WorkSpec.EV_requestEvents
varMap[':lockTimeLimit'] = lockTimeLimit
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
tmpWorkers = dict()
for tmpWorkerID, tmpWorkStatus in resW:
tmpWorkers[tmpWorkerID] = tmpWorkStatus
retVal = {}
for workerID, workStatus in iteritems(tmpWorkers):
# lock worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':timeNow'] = timeNow
varMap[':status'] = workStatus
varMap[':eventsRequest'] = WorkSpec.EV_requestEvents
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':lockedBy'] = locked_by
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
# skip if not locked
if nRow == 0:
continue
# get worker
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
if workSpec.computingSite not in retVal:
retVal[workSpec.computingSite] = []
retVal[workSpec.computingSite].append(workSpec)
tmpLog.debug('got {0} workers'.format(len(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# update jobs and workers
def update_jobs_workers(self, jobspec_list, workspec_list, locked_by, panda_ids_list=None):
try:
timeNow = datetime.datetime.utcnow()
# sql to check job
sqlCJ = "SELECT status FROM {0} WHERE PandaID=:PandaID FOR UPDATE ".format(jobTableName)
# sql to check file
sqlFC = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlFC += "WHERE PandaID=:PandaID AND lfn=:lfn "
# sql to get all LFNs
sqlFL = "SELECT lfn FROM {0} ".format(fileTableName)
sqlFL += "WHERE PandaID=:PandaID AND fileType<>:type "
# sql to check file with eventRangeID
sqlFE = "SELECT 1 c FROM {0} ".format(fileTableName)
sqlFE += "WHERE PandaID=:PandaID AND lfn=:lfn AND eventRangeID=:eventRangeID ".format(fileTableName)
# sql to insert file
sqlFI = "INSERT INTO {0} ({1}) ".format(fileTableName, FileSpec.column_names())
sqlFI += FileSpec.bind_values_expression()
# sql to get pending files
sqlFP = "SELECT fileID,fsize,lfn FROM {0} ".format(fileTableName)
sqlFP += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type "
# sql to get provenanceID,workerID for pending files
sqlPW = "SELECT SUM(fsize),provenanceID,workerID FROM {0} ".format(fileTableName)
sqlPW += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type "
sqlPW += "GROUP BY provenanceID,workerID "
# sql to update pending files
sqlFU = "UPDATE {0} ".format(fileTableName)
sqlFU += "SET status=:status,zipFileID=:zipFileID "
sqlFU += "WHERE fileID=:fileID "
# sql to check event
sqlEC = "SELECT eventRangeID,eventStatus FROM {0} ".format(eventTableName)
sqlEC += "WHERE PandaID=:PandaID AND eventRangeID IS NOT NULL "
# sql to check associated file
sqlEF = "SELECT eventRangeID,status FROM {0} ".format(fileTableName)
sqlEF += "WHERE PandaID=:PandaID AND eventRangeID IS NOT NULL "
# sql to insert event
sqlEI = "INSERT INTO {0} ({1}) ".format(eventTableName, EventSpec.column_names())
sqlEI += EventSpec.bind_values_expression()
# sql to update event
sqlEU = "UPDATE {0} ".format(eventTableName)
sqlEU += "SET eventStatus=:eventStatus,subStatus=:subStatus "
sqlEU += "WHERE PandaID=:PandaID AND eventRangeID=:eventRangeID "
# sql to check if relationship is already available
sqlCR = "SELECT 1 c FROM {0} WHERE PandaID=:PandaID AND workerID=:workerID ".format(jobWorkerTableName)
# sql to insert job and worker relationship
sqlIR = "INSERT INTO {0} ({1}) ".format(jobWorkerTableName, JobWorkerRelationSpec.column_names())
sqlIR += JobWorkerRelationSpec.bind_values_expression()
# count number of workers
sqlNW = "SELECT DISTINCT t.workerID FROM {0} t, {1} w ".format(jobWorkerTableName, workTableName)
sqlNW += "WHERE t.PandaID=:PandaID AND w.workerID=t.workerID "
sqlNW += "AND w.status IN (:st_submitted,:st_running,:st_idle) "
# update job
if jobspec_list is not None:
if len(workspec_list) > 0 and workspec_list[0].mapType == WorkSpec.MT_MultiWorkers:
isMultiWorkers = True
else:
isMultiWorkers = False
for jobSpec in jobspec_list:
tmpLog = core_utils.make_logger(_logger, 'PandaID={0} by {1}'.format(jobSpec.PandaID, locked_by),
method_name='update_jobs_workers')
# check job
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlCJ, varMap)
resCJ = self.cur.fetchone()
tmpJobStatus, = resCJ
# don't update cancelled jobs
if tmpJobStatus == ['cancelled']:
pass
else:
# get nWorkers
tmpLog.debug('start')
activeWorkers = set()
if isMultiWorkers:
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlNW, varMap)
resNW = self.cur.fetchall()
for tmpWorkerID, in resNW:
activeWorkers.add(tmpWorkerID)
jobSpec.nWorkers = len(activeWorkers)
# get all LFNs
allLFNs = set()
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':type'] = 'input'
self.execute(sqlFL, varMap)
resFL = self.cur.fetchall()
for tmpLFN, in resFL:
allLFNs.add(tmpLFN)
# insert files
nFiles = 0
fileIdMap = {}
zipFileRes = dict()
for fileSpec in jobSpec.outFiles:
# insert file
if fileSpec.lfn not in allLFNs:
if jobSpec.zipPerMB is None or fileSpec.isZip in [0, 1]:
fileSpec.status = 'defined'
jobSpec.hasOutFile = JobSpec.HO_hasOutput
else:
fileSpec.status = 'pending'
varMap = fileSpec.values_list()
self.execute(sqlFI, varMap)
fileSpec.fileID = self.cur.lastrowid
nFiles += 1
# mapping between event range ID and file ID
if fileSpec.eventRangeID is not None:
fileIdMap[fileSpec.eventRangeID] = fileSpec.fileID
# associate to itself
if fileSpec.isZip == 1:
varMap = dict()
varMap[':status'] = fileSpec.status
varMap[':fileID'] = fileSpec.fileID
varMap[':zipFileID'] = fileSpec.fileID
self.execute(sqlFU, varMap)
elif fileSpec.isZip == 1 and fileSpec.eventRangeID is not None:
# add a fake file with eventRangeID which has the same lfn/zipFileID as zip file
varMap = dict()
varMap[':PandaID'] = fileSpec.PandaID
varMap[':lfn'] = fileSpec.lfn
varMap[':eventRangeID'] = fileSpec.eventRangeID
self.execute(sqlFE, varMap)
resFE = self.cur.fetchone()
if resFE is None:
if fileSpec.lfn not in zipFileRes:
# get file
varMap = dict()
varMap[':PandaID'] = fileSpec.PandaID
varMap[':lfn'] = fileSpec.lfn
self.execute(sqlFC, varMap)
resFC = self.cur.fetchone()
zipFileRes[fileSpec.lfn] = resFC
# associate to existing zip
resFC = zipFileRes[fileSpec.lfn]
zipFileSpec = FileSpec()
zipFileSpec.pack(resFC)
fileSpec.status = 'zipped'
fileSpec.zipFileID = zipFileSpec.zipFileID
varMap = fileSpec.values_list()
self.execute(sqlFI, varMap)
nFiles += 1
# mapping between event range ID and file ID
fileIdMap[fileSpec.eventRangeID] = self.cur.lastrowid
if nFiles > 0:
tmpLog.debug('inserted {0} files'.format(nFiles))
# check pending files
if jobSpec.zipPerMB is not None and \
not (jobSpec.zipPerMB == 0 and jobSpec.subStatus != 'to_transfer'):
# get workerID and provenanceID of pending files
zippedFileIDs = []
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':status'] = 'pending'
varMap[':type'] = 'input'
self.execute(sqlPW, varMap)
resPW = self.cur.fetchall()
for subTotalSize, tmpProvenanceID, tmpWorkerID in resPW:
if jobSpec.subStatus == 'to_transfer' \
or (jobSpec.zipPerMB > 0 and subTotalSize > jobSpec.zipPerMB * 1024 * 1024) \
or (tmpWorkerID is not None and tmpWorkerID not in activeWorkers):
sqlFPx = sqlFP
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':status'] = 'pending'
varMap[':type'] = 'input'
if tmpProvenanceID is None:
sqlFPx += 'AND provenanceID IS NULL '
else:
varMap[':provenanceID'] = tmpProvenanceID
sqlFPx += 'AND provenanceID=:provenanceID '
if tmpWorkerID is None:
sqlFPx += 'AND workerID IS NULL '
else:
varMap[':workerID'] = tmpWorkerID
sqlFPx += 'AND workerID=:workerID'
# get pending files
self.execute(sqlFPx, varMap)
resFP = self.cur.fetchall()
tmpLog.debug('got {0} pending files for workerID={1} provenanceID={2}'.format(
len(resFP),
tmpWorkerID,
tmpProvenanceID))
# make subsets
subTotalSize = 0
subFileIDs = []
for tmpFileID, tmpFsize, tmpLFN in resFP:
if jobSpec.zipPerMB > 0 and subTotalSize > 0 \
and (subTotalSize + tmpFsize > jobSpec.zipPerMB * 1024 * 1024):
zippedFileIDs.append(subFileIDs)
subFileIDs = []
subTotalSize = 0
subTotalSize += tmpFsize
subFileIDs.append((tmpFileID, tmpLFN))
if (jobSpec.subStatus == 'to_transfer'
or (jobSpec.zipPerMB > 0 and subTotalSize > jobSpec.zipPerMB * 1024 * 1024)
or (tmpWorkerID is not None and tmpWorkerID not in activeWorkers)) \
and len(subFileIDs) > 0:
zippedFileIDs.append(subFileIDs)
# make zip files
for subFileIDs in zippedFileIDs:
# insert zip file
fileSpec = FileSpec()
fileSpec.status = 'zipping'
fileSpec.lfn = 'panda.' + subFileIDs[0][-1] + '.zip'
fileSpec.scope = 'panda'
fileSpec.fileType = 'zip_output'
fileSpec.PandaID = jobSpec.PandaID
fileSpec.taskID = jobSpec.taskID
fileSpec.isZip = 1
varMap = fileSpec.values_list()
self.execute(sqlFI, varMap)
# update pending files
varMaps = []
for tmpFileID, tmpLFN in subFileIDs:
varMap = dict()
varMap[':status'] = 'zipped'
varMap[':fileID'] = tmpFileID
varMap[':zipFileID'] = self.cur.lastrowid
varMaps.append(varMap)
self.executemany(sqlFU, varMaps)
# set zip output flag
if len(zippedFileIDs) > 0:
jobSpec.hasOutFile = JobSpec.HO_hasZipOutput
# get event ranges and file stat
eventFileStat = dict()
eventRangesSet = set()
doneEventRangesSet = set()
if len(jobSpec.events) > 0:
# get event ranges
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlEC, varMap)
resEC = self.cur.fetchall()
for tmpEventRangeID, tmpEventStatus in resEC:
if tmpEventStatus in ['running']:
eventRangesSet.add(tmpEventRangeID)
else:
doneEventRangesSet.add(tmpEventRangeID)
# check associated file
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlEF, varMap)
resEF = self.cur.fetchall()
for tmpEventRangeID, tmpStat in resEF:
eventFileStat[tmpEventRangeID] = tmpStat
# insert or update events
varMapsEI = []
varMapsEU = []
for eventSpec in jobSpec.events:
# already done
if eventSpec.eventRangeID in doneEventRangesSet:
continue
# set subStatus
if eventSpec.eventStatus == 'finished':
# check associated file
if eventSpec.eventRangeID not in eventFileStat or \
eventFileStat[eventSpec.eventRangeID] == 'finished':
eventSpec.subStatus = 'finished'
elif eventFileStat[eventSpec.eventRangeID] == 'failed':
eventSpec.eventStatus = 'failed'
eventSpec.subStatus = 'failed'
else:
eventSpec.subStatus = 'transferring'
else:
eventSpec.subStatus = eventSpec.eventStatus
# set fileID
if eventSpec.eventRangeID in fileIdMap:
eventSpec.fileID = fileIdMap[eventSpec.eventRangeID]
# insert or update event
if eventSpec.eventRangeID not in eventRangesSet:
varMap = eventSpec.values_list()
varMapsEI.append(varMap)
else:
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':eventRangeID'] = eventSpec.eventRangeID
varMap[':eventStatus'] = eventSpec.eventStatus
varMap[':subStatus'] = eventSpec.subStatus
varMapsEU.append(varMap)
if len(varMapsEI) > 0:
self.executemany(sqlEI, varMapsEI)
tmpLog.debug('inserted {0} event'.format(len(varMapsEI)))
if len(varMapsEU) > 0:
self.executemany(sqlEU, varMapsEU)
tmpLog.debug('updated {0} event'.format(len(varMapsEU)))
# update job
varMap = jobSpec.values_map(only_changed=True)
if len(varMap) > 0:
tmpLog.debug('update job')
# sql to update job
sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobSpec.bind_update_changes_expression())
sqlJ += "WHERE PandaID=:PandaID "
jobSpec.lockedBy = None
jobSpec.modificationTime = timeNow
varMap = jobSpec.values_map(only_changed=True)
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlJ, varMap)
nRow = self.cur.rowcount
tmpLog.debug('done with {0}'.format(nRow))
tmpLog.debug('all done for job')
# commit
self.commit()
# update worker
retVal = True
for idxW, workSpec in enumerate(workspec_list):
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(workSpec.workerID),
method_name='update_jobs_workers')
tmpLog.debug('update worker')
workSpec.lockedBy = None
if workSpec.status == WorkSpec.ST_running and workSpec.startTime is None:
workSpec.startTime = timeNow
elif workSpec.is_final_status():
if workSpec.startTime is None:
workSpec.startTime = timeNow
if workSpec.endTime is None:
workSpec.endTime = timeNow
if not workSpec.nextLookup:
if workSpec.has_updated_attributes():
workSpec.modificationTime = timeNow
else:
workSpec.nextLookup = False
# sql to update worker
sqlW = "UPDATE {0} SET {1} ".format(workTableName, workSpec.bind_update_changes_expression())
sqlW += "WHERE workerID=:workerID AND lockedBy=:cr_lockedBy "
sqlW += "AND (status NOT IN (:st1,:st2,:st3,:st4)) "
varMap = workSpec.values_map(only_changed=True)
if len(varMap) > 0:
varMap[':workerID'] = workSpec.workerID
varMap[':cr_lockedBy'] = locked_by
varMap[':st1'] = WorkSpec.ST_cancelled
varMap[':st2'] = WorkSpec.ST_finished
varMap[':st3'] = WorkSpec.ST_failed
varMap[':st4'] = WorkSpec.ST_missed
self.execute(sqlW, varMap)
nRow = self.cur.rowcount
tmpLog.debug('done with {0}'.format(nRow))
if nRow == 0:
retVal = False
# insert relationship if necessary
if panda_ids_list is not None and len(panda_ids_list) > idxW:
varMapsIR = []
for pandaID in panda_ids_list[idxW]:
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':workerID'] = workSpec.workerID
self.execute(sqlCR, varMap)
resCR = self.cur.fetchone()
if resCR is None:
jwRelation = JobWorkerRelationSpec()
jwRelation.PandaID = pandaID
jwRelation.workerID = workSpec.workerID
varMap = jwRelation.values_list()
varMapsIR.append(varMap)
if len(varMapsIR) > 0:
self.executemany(sqlIR, varMapsIR)
tmpLog.debug('all done for worker')
# commit
self.commit()
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get jobs with workerID
def get_jobs_with_worker_id(self, worker_id, locked_by, with_file=False, only_running=False, slim=False):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='get_jobs_with_worker_id')
tmpLog.debug('start')
# sql to get PandaIDs
sqlP = "SELECT PandaID FROM {0} ".format(jobWorkerTableName)
sqlP += "WHERE workerID=:workerID "
# sql to get jobs
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(slim=slim), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to get job parameters
sqlJJ = "SELECT jobParams FROM {0} ".format(jobTableName)
sqlJJ += "WHERE PandaID=:PandaID "
# sql to lock job
sqlL = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(jobTableName)
sqlL += "WHERE PandaID=:PandaID "
# sql to get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE PandaID=:PandaID AND zipFileID IS NULL "
# get jobs
jobChunkList = []
timeNow = datetime.datetime.utcnow()
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlP, varMap)
resW = self.cur.fetchall()
for pandaID, in resW:
# get job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(resJ, slim=slim)
if only_running and jobSpec.subStatus not in ['running', 'submitted', 'queued', 'idle']:
continue
jobSpec.lockedBy = locked_by
# for old jobs without extractions
if jobSpec.jobParamsExtForLog is None:
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJJ, varMap)
resJJ = self.cur.fetchone()
jobSpec.set_blob_attribute('jobParams', resJJ[0])
jobSpec.get_output_file_attributes()
jobSpec.get_logfile_info()
# lock job
if locked_by is not None:
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = timeNow
self.execute(sqlL, varMap)
# get files
if with_file:
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlF, varMap)
resFileList = self.cur.fetchall()
for resFile in resFileList:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobSpec.add_file(fileSpec)
# append
jobChunkList.append(jobSpec)
# commit
self.commit()
tmpLog.debug('got {0} job chunks'.format(len(jobChunkList)))
return jobChunkList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get ready workers
def get_ready_workers(self, queue_name, n_ready):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'queue={0}'.format(queue_name),
method_name='get_ready_workers')
tmpLog.debug('start')
# sql to get workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE computingSite=:queueName AND (status=:status_ready OR (status=:status_running "
sqlG += "AND nJobsToReFill IS NOT NULL AND nJobsToReFill>0)) "
sqlG += "ORDER BY modificationTime LIMIT {0} ".format(n_ready)
# sql to get associated PandaIDs
sqlP = "SELECT COUNT(*) cnt FROM {0} ".format(jobWorkerTableName)
sqlP += "WHERE workerID=:workerID "
# get workers
varMap = dict()
varMap[':status_ready'] = WorkSpec.ST_ready
varMap[':status_running'] = WorkSpec.ST_running
varMap[':queueName'] = queue_name
self.execute(sqlG, varMap)
resList = self.cur.fetchall()
retVal = []
for res in resList:
workSpec = WorkSpec()
workSpec.pack(res)
# get number of jobs
varMap = dict()
varMap[':workerID'] = workSpec.workerID
self.execute(sqlP, varMap)
resP = self.cur.fetchone()
if resP is not None and resP[0] > 0:
workSpec.nJobs = resP[0]
retVal.append(workSpec)
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get a worker
def get_worker_with_id(self, worker_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='get_worker_with_id')
tmpLog.debug('start')
# sql to get a worker
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
# get a worker
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlG, varMap)
res = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(res)
# commit
self.commit()
tmpLog.debug('got')
return workSpec
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get jobs to trigger or check output transfer or zip output
def get_jobs_for_stage_out(self, max_jobs, interval_without_lock, interval_with_lock, locked_by,
sub_status, has_out_file_flag, bad_has_out_file_flag=None,
max_files_per_job=None):
try:
# get logger
msgPfx = 'thr={0}'.format(locked_by)
tmpLog = core_utils.make_logger(_logger, msgPfx, method_name='get_jobs_for_stage_out')
tmpLog.debug('start')
# sql to get PandaIDs without FOR UPDATE which causes deadlock in MariaDB
sql = "SELECT PandaID FROM {0} ".format(jobTableName)
sql += "WHERE "
sql += "(subStatus=:subStatus OR hasOutFile=:hasOutFile) "
if bad_has_out_file_flag is not None:
sql += "AND (hasOutFile IS NULL OR hasOutFile<>:badHasOutFile) "
sql += "AND (stagerTime IS NULL "
sql += "OR (stagerTime<:lockTimeLimit AND stagerLock IS NOT NULL) "
sql += "OR (stagerTime<:updateTimeLimit AND stagerLock IS NULL) "
sql += ") "
sql += "ORDER BY stagerTime "
sql += "LIMIT {0} ".format(max_jobs)
# sql to lock job
sqlL = "UPDATE {0} SET stagerTime=:timeNow,stagerLock=:lockedBy ".format(jobTableName)
sqlL += "WHERE PandaID=:PandaID AND "
sqlL += "(subStatus=:subStatus OR hasOutFile=:hasOutFile) "
if bad_has_out_file_flag is not None:
sqlL += "AND (hasOutFile IS NULL OR hasOutFile<>:badHasOutFile) "
sqlL += "AND (stagerTime IS NULL "
sqlL += "OR (stagerTime<:lockTimeLimit AND stagerLock IS NOT NULL) "
sqlL += "OR (stagerTime<:updateTimeLimit AND stagerLock IS NULL) "
sqlL += ") "
# sql to get job
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(slim=True), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to get job parameters
sqlJJ = "SELECT jobParams FROM {0} ".format(jobTableName)
sqlJJ += "WHERE PandaID=:PandaID "
# sql to get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type "
if max_files_per_job is not None and max_files_per_job > 0:
sqlF += "LIMIT {0} ".format(max_files_per_job)
# sql to get associated files
sqlAF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlAF += "WHERE PandaID=:PandaID AND zipFileID=:zipFileID AND fileType<>:type "
# sql to increment attempt number
sqlFU = "UPDATE {0} SET attemptNr=attemptNr+1 WHERE fileID=:fileID ".format(fileTableName)
# get jobs
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=interval_with_lock)
updateTimeLimit = timeNow - datetime.timedelta(seconds=interval_without_lock)
varMap = dict()
varMap[':subStatus'] = sub_status
varMap[':hasOutFile'] = has_out_file_flag
if bad_has_out_file_flag is not None:
varMap[':badHasOutFile'] = bad_has_out_file_flag
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':updateTimeLimit'] = updateTimeLimit
self.execute(sql, varMap)
resList = self.cur.fetchall()
jobSpecList = []
for pandaID, in resList:
# lock job
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':timeNow'] = timeNow
varMap[':lockedBy'] = locked_by
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':updateTimeLimit'] = updateTimeLimit
varMap[':subStatus'] = sub_status
varMap[':hasOutFile'] = has_out_file_flag
if bad_has_out_file_flag is not None:
varMap[':badHasOutFile'] = bad_has_out_file_flag
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
if nRow > 0:
# get job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(resJ, slim=True)
jobSpec.stagerLock = locked_by
jobSpec.stagerTime = timeNow
# for old jobs without extractions
if jobSpec.jobParamsExtForLog is None:
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJJ, varMap)
resJJ = self.cur.fetchone()
jobSpec.set_blob_attribute('jobParams', resJJ[0])
jobSpec.get_output_file_attributes()
jobSpec.get_logfile_info()
# get files
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':type'] = 'input'
if has_out_file_flag == JobSpec.HO_hasOutput:
varMap[':status'] = 'defined'
elif has_out_file_flag == JobSpec.HO_hasZipOutput:
varMap[':status'] = 'zipping'
else:
varMap[':status'] = 'transferring'
self.execute(sqlF, varMap)
resFileList = self.cur.fetchall()
for resFile in resFileList:
fileSpec = FileSpec()
fileSpec.pack(resFile)
fileSpec.attemptNr += 1
jobSpec.add_out_file(fileSpec)
# increment attempt number
varMap = dict()
varMap[':fileID'] = fileSpec.fileID
self.execute(sqlFU, varMap)
jobSpecList.append(jobSpec)
# commit
if len(resFileList) > 0:
self.commit()
# get associated files
if has_out_file_flag == JobSpec.HO_hasZipOutput:
for fileSpec in jobSpec.outFiles:
varMap = dict()
varMap[':PandaID'] = fileSpec.PandaID
varMap[':zipFileID'] = fileSpec.fileID
varMap[':type'] = 'input'
self.execute(sqlAF, varMap)
resAFs = self.cur.fetchall()
for resAF in resAFs:
assFileSpec = FileSpec()
assFileSpec.pack(resAF)
fileSpec.add_associated_file(assFileSpec)
# get associated workers
tmpWorkers = self.get_workers_with_job_id(jobSpec.PandaID, use_commit=False)
jobSpec.add_workspec_list(tmpWorkers)
tmpLog.debug('got {0} jobs'.format(len(jobSpecList)))
return jobSpecList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# update job for stage-out
def update_job_for_stage_out(self, jobspec, update_event_status, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger,
'PandaID={0} subStatus={1} thr={2}'.format(jobspec.PandaID,
jobspec.subStatus,
locked_by),
method_name='update_job_for_stage_out')
tmpLog.debug('start')
# sql to update event
sqlEU = "UPDATE {0} ".format(eventTableName)
sqlEU += "SET eventStatus=:eventStatus,subStatus=:subStatus "
sqlEU += "WHERE eventRangeID=:eventRangeID "
sqlEU += "AND eventStatus<>:statusFailed AND subStatus<>:statusDone "
# sql to update associated events
sqlAE = "UPDATE {0} ".format(eventTableName)
sqlAE += "SET eventStatus=:eventStatus,subStatus=:subStatus "
sqlAE += "WHERE eventRangeID IN "
sqlAE += "(SELECT eventRangeID FROM {0} ".format(fileTableName)
sqlAE += "WHERE PandaID=:PandaID AND zipFileID=:zipFileID) "
sqlAE += "AND eventStatus<>:statusFailed AND subStatus<>:statusDone "
# sql to lock job again
sqlLJ = "UPDATE {0} SET stagerTime=:timeNow ".format(jobTableName)
sqlLJ += "WHERE PandaID=:PandaID AND stagerLock=:lockedBy "
# sql to check lock
sqlLC = "SELECT stagerLock FROM {0} ".format(jobTableName)
sqlLC += "WHERE PandaID=:PandaID "
# lock
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlLJ, varMap)
nRow = self.cur.rowcount
# check just in case since nRow can be 0 if two lock actions are too close in time
if nRow == 0:
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
self.execute(sqlLC, varMap)
resLC = self.cur.fetchone()
if resLC is not None and resLC[0] == locked_by:
nRow = 1
# commit
self.commit()
if nRow == 0:
tmpLog.debug('skip since locked by another')
return None
# update files
tmpLog.debug('update {0} files'.format(len(jobspec.outFiles)))
for fileSpec in jobspec.outFiles:
# sql to update file
sqlF = "UPDATE {0} SET {1} ".format(fileTableName, fileSpec.bind_update_changes_expression())
sqlF += "WHERE PandaID=:PandaID AND fileID=:fileID "
varMap = fileSpec.values_map(only_changed=True)
updated = False
if len(varMap) > 0:
varMap[':PandaID'] = fileSpec.PandaID
varMap[':fileID'] = fileSpec.fileID
self.execute(sqlF, varMap)
updated = True
# update event status
if update_event_status:
if fileSpec.eventRangeID is not None:
varMap = dict()
varMap[':eventRangeID'] = fileSpec.eventRangeID
varMap[':eventStatus'] = fileSpec.status
varMap[':subStatus'] = fileSpec.status
varMap[':statusFailed'] = 'failed'
varMap[':statusDone'] = 'done'
self.execute(sqlEU, varMap)
updated = True
if fileSpec.isZip == 1:
# update files associated with zip file
varMap = dict()
varMap[':PandaID'] = fileSpec.PandaID
varMap[':zipFileID'] = fileSpec.fileID
varMap[':eventStatus'] = fileSpec.status
varMap[':subStatus'] = fileSpec.status
varMap[':statusFailed'] = 'failed'
varMap[':statusDone'] = 'done'
self.execute(sqlAE, varMap)
updated = True
nRow = self.cur.rowcount
tmpLog.debug('updated {0} events'.format(nRow))
if updated:
# lock job again
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlLJ, varMap)
# commit
self.commit()
nRow = self.cur.rowcount
if nRow == 0:
tmpLog.debug('skip since locked by another')
return None
# count files
sqlC = "SELECT COUNT(*) cnt,status FROM {0} ".format(fileTableName)
sqlC += "WHERE PandaID=:PandaID GROUP BY status "
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
self.execute(sqlC, varMap)
resC = self.cur.fetchall()
cntMap = {}
for cnt, fileStatus in resC:
cntMap[fileStatus] = cnt
# set job attributes
jobspec.stagerLock = None
if 'zipping' in cntMap:
jobspec.hasOutFile = JobSpec.HO_hasZipOutput
elif 'defined' in cntMap:
jobspec.hasOutFile = JobSpec.HO_hasOutput
elif 'transferring' in cntMap:
jobspec.hasOutFile = JobSpec.HO_hasTransfer
else:
jobspec.hasOutFile = JobSpec.HO_noOutput
if jobspec.subStatus == 'to_transfer':
# change subStatus when no more files to trigger transfer
if jobspec.hasOutFile not in [JobSpec.HO_hasOutput, JobSpec.HO_hasZipOutput]:
jobspec.subStatus = 'transferring'
jobspec.stagerTime = None
elif jobspec.subStatus == 'transferring':
# all done
if jobspec.hasOutFile == JobSpec.HO_noOutput:
jobspec.trigger_propagation()
if 'failed' in cntMap:
jobspec.status = 'failed'
jobspec.subStatus = 'failed_to_stage_out'
else:
jobspec.subStatus = 'staged'
# get finished files
jobspec.reset_out_file()
sqlFF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlFF += "WHERE PandaID=:PandaID AND status=:status AND fileType IN (:type1,:type2) "
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':status'] = 'finished'
varMap[':type1'] = 'output'
varMap[':type2'] = 'log'
self.execute(sqlFF, varMap)
resFileList = self.cur.fetchall()
for resFile in resFileList:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobspec.add_out_file(fileSpec)
# make file report
jobspec.outputFilesToReport = core_utils.get_output_file_report(jobspec)
# sql to update job
sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobspec.bind_update_changes_expression())
sqlJ += "WHERE PandaID=:PandaID AND stagerLock=:lockedBy "
# update job
varMap = jobspec.values_map(only_changed=True)
varMap[':PandaID'] = jobspec.PandaID
varMap[':lockedBy'] = locked_by
self.execute(sqlJ, varMap)
# commit
self.commit()
tmpLog.debug('done')
# return
return jobspec.subStatus
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# add a seq number
def add_seq_number(self, number_name, init_value):
try:
# check if already there
sqlC = "SELECT curVal FROM {0} WHERE numberName=:numberName ".format(seqNumberTableName)
varMap = dict()
varMap[':numberName'] = number_name
self.execute(sqlC, varMap)
res = self.cur.fetchone()
# insert if missing
if res is None:
# make spec
seqNumberSpec = SeqNumberSpec()
seqNumberSpec.numberName = number_name
seqNumberSpec.curVal = init_value
# insert
sqlI = "INSERT INTO {0} ({1}) ".format(seqNumberTableName, SeqNumberSpec.column_names())
sqlI += SeqNumberSpec.bind_values_expression()
varMap = seqNumberSpec.values_list()
self.execute(sqlI, varMap)
# commit
self.commit()
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get next value for a seq number
def get_next_seq_number(self, number_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'name={0}'.format(number_name),
method_name='get_next_seq_number')
# increment
sqlU = "UPDATE {0} SET curVal=curVal+1 WHERE numberName=:numberName ".format(seqNumberTableName)
varMap = dict()
varMap[':numberName'] = number_name
self.execute(sqlU, varMap)
# get
sqlG = "SELECT curVal FROM {0} WHERE numberName=:numberName ".format(seqNumberTableName)
varMap = dict()
varMap[':numberName'] = number_name
self.execute(sqlG, varMap)
retVal, = self.cur.fetchone()
# commit
self.commit()
tmpLog.debug('got {0}'.format(retVal))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get last update time for a cached info
def get_cache_last_update_time(self, main_key, sub_key):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key),
method_name='get_cache_last_update_time')
# get
varMap = dict()
varMap[":mainKey"] = main_key
sqlU = "SELECT lastUpdate FROM {0} WHERE mainKey=:mainKey ".format(cacheTableName)
if sub_key is not None:
sqlU += "AND subKey=:subKey "
varMap[":subKey"] = sub_key
self.execute(sqlU, varMap)
retVal = self.cur.fetchone()
if retVal is not None:
retVal, = retVal
# commit
self.commit()
tmpLog.debug('got {0}'.format(retVal))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# refresh a cached info
def refresh_cache(self, main_key, sub_key, new_info):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key),
method_name='refresh_cache')
# make spec
cacheSpec = CacheSpec()
cacheSpec.lastUpdate = datetime.datetime.utcnow()
cacheSpec.data = new_info
# check if already there
varMap = dict()
varMap[":mainKey"] = main_key
sqlC = "SELECT lastUpdate FROM {0} WHERE mainKey=:mainKey ".format(cacheTableName)
if sub_key is not None:
sqlC += "AND subKey=:subKey "
varMap[":subKey"] = sub_key
self.execute(sqlC, varMap)
retC = self.cur.fetchone()
if retC is None:
# insert if missing
cacheSpec.mainKey = main_key
cacheSpec.subKey = sub_key
sqlU = "INSERT INTO {0} ({1}) ".format(cacheTableName, CacheSpec.column_names())
sqlU += CacheSpec.bind_values_expression()
varMap = cacheSpec.values_list()
else:
# update
sqlU = "UPDATE {0} SET {1} ".format(cacheTableName, cacheSpec.bind_update_changes_expression())
sqlU += "WHERE mainKey=:mainKey "
varMap = cacheSpec.values_map(only_changed=True)
varMap[":mainKey"] = main_key
if sub_key is not None:
sqlU += "AND subKey=:subKey "
varMap[":subKey"] = sub_key
self.execute(sqlU, varMap)
# commit
self.commit()
# put into global dict
cacheKey = 'cache|{0}|{1}'.format(main_key, sub_key)
globalDict = core_utils.get_global_dict()
globalDict.acquire()
globalDict[cacheKey] = cacheSpec.data
globalDict.release()
tmpLog.debug('refreshed')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get a cached info
def get_cache(self, main_key, sub_key=None):
useDB = False
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key),
method_name='get_cache')
tmpLog.debug('start')
# get from global dict
cacheKey = 'cache|{0}|{1}'.format(main_key, sub_key)
globalDict = core_utils.get_global_dict()
# lock dict
globalDict.acquire()
# found
if cacheKey in globalDict:
# release dict
globalDict.release()
# make spec
cacheSpec = CacheSpec()
cacheSpec.data = globalDict[cacheKey]
else:
# read from database
useDB = True
sql = "SELECT {0} FROM {1} ".format(CacheSpec.column_names(), cacheTableName)
sql += "WHERE mainKey=:mainKey "
varMap = dict()
varMap[":mainKey"] = main_key
if sub_key is not None:
sql += "AND subKey=:subKey "
varMap[":subKey"] = sub_key
self.execute(sql, varMap)
resJ = self.cur.fetchone()
# commit
self.commit()
if resJ is None:
# release dict
globalDict.release()
return None
# make spec
cacheSpec = CacheSpec()
cacheSpec.pack(resJ)
# put into global dict
globalDict[cacheKey] = cacheSpec.data
# release dict
globalDict.release()
tmpLog.debug('done')
# return
return cacheSpec
except Exception:
if useDB:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# store commands
def store_commands(self, command_specs):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='store_commands')
tmpLog.debug('{0} commands'.format(len(command_specs)))
if not command_specs:
return True
try:
# sql to insert a command
sql = "INSERT INTO {0} ({1}) ".format(commandTableName, CommandSpec.column_names())
sql += CommandSpec.bind_values_expression()
# loop over all commands
var_maps = []
for command_spec in command_specs:
var_map = command_spec.values_list()
var_maps.append(var_map)
# insert
self.executemany(sql, var_maps)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get commands for a receiver
def get_commands_for_receiver(self, receiver, command_pattern=None):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_commands_for_receiver')
tmpLog.debug('start')
# sql to get commands
varMap = dict()
varMap[':receiver'] = receiver
varMap[':processed'] = 0
sqlG = "SELECT {0} FROM {1} ".format(CommandSpec.column_names(), commandTableName)
sqlG += "WHERE receiver=:receiver AND processed=:processed "
if command_pattern is not None:
varMap[':command'] = command_pattern
if '%' in command_pattern:
sqlG += "AND command LIKE :command "
else:
sqlG += "AND command=:command "
sqlG += "FOR UPDATE "
# sql to lock command
sqlL = "UPDATE {0} SET processed=:processed WHERE command_id=:command_id ".format(commandTableName)
self.execute(sqlG, varMap)
commandSpecList = []
for res in self.cur.fetchall():
# make command
commandSpec = CommandSpec()
commandSpec.pack(res)
# lock
varMap = dict()
varMap[':command_id'] = commandSpec.command_id
varMap[':processed'] = 1
self.execute(sqlL, varMap)
# append
commandSpecList.append(commandSpec)
# commit
self.commit()
tmpLog.debug('got {0} commands'.format(len(commandSpecList)))
return commandSpecList
except Exception:
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get command ids that have been processed and need to be acknowledged to panda server
def get_commands_ack(self):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_commands_ack')
tmpLog.debug('start')
# sql to get commands that have been processed and need acknowledgement
sql = """
SELECT command_id FROM {0}
WHERE ack_requested=1
AND processed=1
""".format(commandTableName)
self.execute(sql)
command_ids = [row[0] for row in self.cur.fetchall()]
tmpLog.debug('command_ids {0}'.format(command_ids))
return command_ids
except Exception:
# dump error
core_utils.dump_error_message(_logger)
# return
return []
def clean_commands_by_id(self, commands_ids):
"""
Deletes the commands specified in a list of IDs
"""
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='clean_commands_by_id')
try:
# sql to delete a specific command
sql = """
DELETE FROM {0}
WHERE command_id=:command_id""".format(commandTableName)
for command_id in commands_ids:
var_map = {':command_id': command_id}
self.execute(sql, var_map)
self.commit()
return True
except Exception:
self.rollback()
core_utils.dump_error_message(tmpLog)
return False
def clean_processed_commands(self):
"""
Deletes the commands that have been processed and do not need acknowledgement
"""
tmpLog = core_utils.make_logger(_logger, method_name='clean_processed_commands')
try:
# sql to delete all processed commands that do not need an ACK
sql = """
DELETE FROM {0}
WHERE (ack_requested=0 AND processed=1)
""".format(commandTableName)
self.execute(sql)
self.commit()
return True
except Exception:
self.rollback()
core_utils.dump_error_message(tmpLog)
return False
# get workers to kill
def get_workers_to_kill(self, max_workers, check_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_kill')
tmpLog.debug('start')
# sql to get worker IDs
sqlW = "SELECT workerID,status,configID FROM {0} ".format(workTableName)
sqlW += "WHERE killTime IS NOT NULL AND killTime<:checkTimeLimit "
sqlW += "ORDER BY killTime LIMIT {0} ".format(max_workers)
# sql to lock or release worker
sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName)
sqlL += "WHERE workerID=:workerID "
sqlL += "AND killTime IS NOT NULL AND killTime<:checkTimeLimit "
# sql to get workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
timeNow = datetime.datetime.utcnow()
timeLimit = timeNow - datetime.timedelta(seconds=check_interval)
# get workerIDs
varMap = dict()
varMap[':checkTimeLimit'] = timeLimit
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
retVal = dict()
for workerID, workerStatus, configID in resW:
# ignore configID
if not core_utils.dynamic_plugin_change():
configID = None
# lock or release worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':checkTimeLimit'] = timeLimit
if workerStatus in (WorkSpec.ST_cancelled, WorkSpec.ST_failed, WorkSpec.ST_finished):
# release
varMap[':setTime'] = None
else:
# lock
varMap[':setTime'] = timeNow
self.execute(sqlL, varMap)
# get worker
nRow = self.cur.rowcount
if nRow == 1 and varMap[':setTime'] is not None:
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
queueName = workSpec.computingSite
retVal.setdefault(queueName, dict())
retVal[queueName].setdefault(configID, [])
retVal[queueName][configID].append(workSpec)
# commit
self.commit()
tmpLog.debug('got {0} workers'.format(len(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get worker stats
def get_worker_stats(self, site_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_stats')
tmpLog.debug('start')
# sql to get nQueueLimit
sqlQ = "SELECT queueName,resourceType,nNewWorkers FROM {0} ".format(pandaQueueTableName)
sqlQ += "WHERE siteName=:siteName "
# get nQueueLimit
varMap = dict()
varMap[':siteName'] = site_name
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
retMap = dict()
for computingSite, resourceType, nNewWorkers in resQ:
if resourceType not in retMap:
retMap[resourceType] = {
'running': 0,
'submitted': 0,
'to_submit': nNewWorkers
}
# get worker stats
sqlW = "SELECT wt.status, wt.computingSite, pq.resourceType, COUNT(*) cnt "
sqlW += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName)
sqlW += "WHERE pq.siteName=:siteName AND wt.computingSite=pq.queueName AND wt.status IN (:st1,:st2) "
sqlW += "GROUP BY wt.status, wt.computingSite, pq.resourceType "
# get worker stats
varMap = dict()
varMap[':siteName'] = site_name
varMap[':st1'] = 'running'
varMap[':st2'] = 'submitted'
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
for workerStatus, computingSite, resourceType, cnt in resW:
if resourceType not in retMap:
retMap[resourceType] = {
'running': 0,
'submitted': 0,
'to_submit': 0
}
retMap[resourceType][workerStatus] = cnt
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get worker stats
def get_worker_stats_bulk(self, active_ups_queues):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_stats_bulk')
tmpLog.debug('start')
# sql to get nQueueLimit
sqlQ = "SELECT queueName, resourceType, nNewWorkers FROM {0} ".format(pandaQueueTableName)
# get nQueueLimit
self.execute(sqlQ)
resQ = self.cur.fetchall()
retMap = dict()
for computingSite, resourceType, nNewWorkers in resQ:
retMap.setdefault(computingSite, {})
if resourceType and resourceType != 'ANY' and resourceType not in retMap[computingSite]:
retMap[computingSite][resourceType] = {'running': 0, 'submitted': 0, 'to_submit': nNewWorkers}
# get worker stats
sqlW = "SELECT wt.status, wt.computingSite, wt.resourceType, COUNT(*) cnt "
sqlW += "FROM {0} wt ".format(workTableName)
sqlW += "WHERE wt.status IN (:st1,:st2) "
sqlW += "GROUP BY wt.status,wt.computingSite, wt.resourceType "
# get worker stats
varMap = dict()
varMap[':st1'] = 'running'
varMap[':st2'] = 'submitted'
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
for workerStatus, computingSite, resourceType, cnt in resW:
if resourceType and resourceType != 'ANY':
retMap.setdefault(computingSite, {})
retMap[computingSite].setdefault(resourceType, {'running': 0, 'submitted': 0, 'to_submit': 0})
retMap[computingSite][resourceType][workerStatus] = cnt
# if there are no jobs for an active UPS queue, it needs to be initialized so that the pilot streaming
# on panda server starts processing the queue
if active_ups_queues:
for ups_queue in active_ups_queues:
if ups_queue not in retMap or not retMap[ups_queue]:
retMap[ups_queue] = {'SCORE': {'running': 0, 'submitted': 0, 'to_submit': 0}}
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# send kill command to workers associated to a job
def kill_workers_with_job(self, panda_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id),
method_name='kill_workers_with_job')
tmpLog.debug('start')
# sql to set killTime
sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName)
sqlL += "WHERE workerID=:workerID AND killTime IS NULL AND NOT status IN (:st1,:st2,:st3) "
# sql to get associated workers
sqlA = "SELECT workerID FROM {0} ".format(jobWorkerTableName)
sqlA += "WHERE PandaID=:pandaID "
# set an older time to trigger sweeper
setTime = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
# get workers
varMap = dict()
varMap[':pandaID'] = panda_id
self.execute(sqlA, varMap)
resA = self.cur.fetchall()
nRow = 0
for workerID, in resA:
# set killTime
varMap = dict()
varMap[':workerID'] = workerID
varMap[':setTime'] = setTime
varMap[':st1'] = WorkSpec.ST_finished
varMap[':st2'] = WorkSpec.ST_failed
varMap[':st3'] = WorkSpec.ST_cancelled
self.execute(sqlL, varMap)
nRow += self.cur.rowcount
# commit
self.commit()
tmpLog.debug('set killTime to {0} workers'.format(nRow))
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# send kill command to a worker
def kill_worker(self, worker_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='kill_worker')
tmpLog.debug('start')
# sql to set killTime
sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName)
sqlL += "WHERE workerID=:workerID AND killTime IS NULL AND NOT status IN (:st1,:st2,:st3) "
# set an older time to trigger sweeper
setTime = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
# set killTime
varMap = dict()
varMap[':workerID'] = worker_id
varMap[':setTime'] = setTime
varMap[':st1'] = WorkSpec.ST_finished
varMap[':st2'] = WorkSpec.ST_failed
varMap[':st3'] = WorkSpec.ST_cancelled
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('set killTime with {0}'.format(nRow))
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get workers for cleanup
def get_workers_for_cleanup(self, max_workers, status_timeout_map):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_for_cleanup')
tmpLog.debug('start')
# sql to get worker IDs
timeNow = datetime.datetime.utcnow()
modTimeLimit = timeNow - datetime.timedelta(minutes=60)
varMap = dict()
varMap[':timeLimit'] = modTimeLimit
sqlW = "SELECT workerID, configID FROM {0} ".format(workTableName)
sqlW += "WHERE lastUpdate IS NULL AND ("
for tmpStatus, tmpTimeout in iteritems(status_timeout_map):
tmpStatusKey = ':status_{0}'.format(tmpStatus)
tmpTimeoutKey = ':timeLimit_{0}'.format(tmpStatus)
sqlW += '(status={0} AND endTime<={1}) OR '.format(tmpStatusKey, tmpTimeoutKey)
varMap[tmpStatusKey] = tmpStatus
varMap[tmpTimeoutKey] = timeNow - datetime.timedelta(hours=tmpTimeout)
sqlW = sqlW[:-4]
sqlW += ') '
sqlW += 'AND modificationTime<:timeLimit '
sqlW += "ORDER BY modificationTime LIMIT {0} ".format(max_workers)
# sql to lock or release worker
sqlL = "UPDATE {0} SET modificationTime=:setTime ".format(workTableName)
sqlL += "WHERE workerID=:workerID AND modificationTime<:timeLimit "
# sql to check associated jobs
sqlA = "SELECT COUNT(*) cnt FROM {0} j, {1} r ".format(jobTableName, jobWorkerTableName)
sqlA += "WHERE j.PandaID=r.PandaID AND r.workerID=:workerID "
sqlA += "AND propagatorTime IS NOT NULL "
# sql to get workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
# sql to get PandaIDs
sqlP = "SELECT j.PandaID FROM {0} j, {1} r ".format(jobTableName, jobWorkerTableName)
sqlP += "WHERE j.PandaID=r.PandaID AND r.workerID=:workerID "
# sql to get jobs
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE PandaID=:PandaID "
# sql to get files not to be deleted. b.todelete is not used to use index on b.lfn
sqlD = "SELECT b.lfn,b.todelete FROM {0} a, {0} b ".format(fileTableName)
sqlD += "WHERE a.PandaID=:PandaID AND a.fileType=:fileType AND b.lfn=a.lfn "
# get workerIDs
timeNow = datetime.datetime.utcnow()
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
retVal = dict()
iWorkers = 0
for workerID, configID in resW:
# lock worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':setTime'] = timeNow
varMap[':timeLimit'] = modTimeLimit
self.execute(sqlL, varMap)
# commit
self.commit()
if self.cur.rowcount == 0:
continue
# ignore configID
if not core_utils.dynamic_plugin_change():
configID = None
# check associated jobs
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlA, varMap)
nActJobs, = self.cur.fetchone()
# cleanup when there is no active job
if nActJobs == 0:
# get worker
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
queueName = workSpec.computingSite
retVal.setdefault(queueName, dict())
retVal[queueName].setdefault(configID, [])
retVal[queueName][configID].append(workSpec)
# get jobs
jobSpecs = []
checkedLFNs = set()
keepLFNs = set()
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlP, varMap)
resP = self.cur.fetchall()
for pandaID, in resP:
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
jobSpec = JobSpec()
jobSpec.pack(resJ)
jobSpecs.append(jobSpec)
# get LFNs not to be deleted
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':fileType'] = 'input'
self.execute(sqlD, varMap)
resDs = self.cur.fetchall()
for tmpLFN, tmpTodelete in resDs:
if tmpTodelete == 0:
keepLFNs.add(tmpLFN)
# get files to be deleted
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlF, varMap)
resFs = self.cur.fetchall()
for resF in resFs:
fileSpec = FileSpec()
fileSpec.pack(resF)
# skip if already checked
if fileSpec.lfn in checkedLFNs:
continue
checkedLFNs.add(fileSpec.lfn)
# check if it is ready to delete
if fileSpec.lfn not in keepLFNs:
jobSpec.add_file(fileSpec)
workSpec.set_jobspec_list(jobSpecs)
iWorkers += 1
tmpLog.debug('got {0} workers'.format(iWorkers))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# delete a worker
def delete_worker(self, worker_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='delete_worker')
tmpLog.debug('start')
# sql to get jobs
sqlJ = "SELECT PandaID FROM {0} ".format(jobWorkerTableName)
sqlJ += "WHERE workerID=:workerID "
# sql to delete job
sqlDJ = "DELETE FROM {0} ".format(jobTableName)
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations
sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDR += "WHERE PandaID=:PandaID "
# sql to delete worker
sqlDW = "DELETE FROM {0} ".format(workTableName)
sqlDW += "WHERE workerID=:workerID "
# get jobs
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlJ, varMap)
resJ = self.cur.fetchall()
for pandaID, in resJ:
varMap = dict()
varMap[':PandaID'] = pandaID
# delete job
self.execute(sqlDJ, varMap)
# delete files
self.execute(sqlDF, varMap)
# delete events
self.execute(sqlDE, varMap)
# delete relations
self.execute(sqlDR, varMap)
# delete worker
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlDW, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# release jobs
def release_jobs(self, panda_ids, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='release_jobs')
tmpLog.debug('start for {0} jobs'.format(len(panda_ids)))
# sql to release job
sql = "UPDATE {0} SET lockedBy=NULL ".format(jobTableName)
sql += "WHERE PandaID=:pandaID AND lockedBy=:lockedBy "
nJobs = 0
for pandaID in panda_ids:
varMap = dict()
varMap[':pandaID'] = pandaID
varMap[':lockedBy'] = locked_by
self.execute(sql, varMap)
if self.cur.rowcount > 0:
nJobs += 1
# commit
self.commit()
tmpLog.debug('released {0} jobs'.format(nJobs))
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# clone queue
def clone_queue_with_new_resource_type(self, site_name, queue_name, resource_type, new_workers):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'site_name={0} queue_name={1}'.format(site_name, queue_name),
method_name='clone_queue_with_new_resource_type')
tmpLog.debug('start')
# get the values from one of the existing queues
sql_select_queue = "SELECT {0} FROM {1} ".format(PandaQueueSpec.column_names(), pandaQueueTableName)
sql_select_queue += "WHERE siteName=:siteName "
var_map = dict()
var_map[':siteName'] = site_name
self.execute(sql_select_queue, var_map)
queue = self.cur.fetchone()
if queue: # a queue to clone was found
var_map = {}
attribute_list = []
attr_binding_list = []
for attribute, value in zip(PandaQueueSpec.column_names().split(','), queue):
attr_binding = ':{0}'.format(attribute)
if attribute == 'resourceType':
var_map[attr_binding] = resource_type
elif attribute == 'nNewWorkers':
var_map[attr_binding] = new_workers
elif attribute == 'uniqueName':
var_map[attr_binding] = core_utils.get_unique_queue_name(queue_name, resource_type)
else:
var_map[attr_binding] = value
attribute_list.append(attribute)
attr_binding_list.append(attr_binding)
sql_insert = "INSERT IGNORE INTO {0} ({1}) ".format(pandaQueueTableName, ','.join(attribute_list))
sql_values = "VALUES ({0}) ".format(','.join(attr_binding_list))
self.execute(sql_insert + sql_values, var_map)
else:
tmpLog.debug("Failed to clone the queue")
self.commit()
return True
except Exception:
self.rollback()
core_utils.dump_error_message(_logger)
return False
# set queue limit
def set_queue_limit(self, site_name, params):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'siteName={0}'.format(site_name), method_name='set_queue_limit')
tmpLog.debug('start')
# sql to reset queue limits before setting new command to avoid old values being repeated again and again
sql_reset = "UPDATE {0} ".format(pandaQueueTableName)
sql_reset += "SET nNewWorkers=:zero WHERE siteName=:siteName "
# sql to get resource types
sql_get_resource = "SELECT resourceType FROM {0} ".format(pandaQueueTableName)
sql_get_resource += "WHERE siteName=:siteName "
sql_get_resource += "FOR UPDATE "
# sql to update nQueueLimit
sql_update_queue = "UPDATE {0} ".format(pandaQueueTableName)
sql_update_queue += "SET nNewWorkers=:nQueue WHERE siteName=:siteName AND resourceType=:resourceType "
# sql to get num of submitted workers
sql_count_workers = "SELECT COUNT(*) cnt "
sql_count_workers += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName)
sql_count_workers += "WHERE pq.siteName=:siteName AND wt.computingSite=pq.queueName AND wt.status=:status "
sql_count_workers += "ANd pq.resourceType=:resourceType "
# reset nqueued for all resource types
varMap = dict()
varMap[':zero'] = 0
varMap[':siteName'] = site_name
self.execute(sql_reset, varMap)
# get resource types
varMap = dict()
varMap[':siteName'] = site_name
self.execute(sql_get_resource, varMap)
resRes = self.cur.fetchall()
resource_type_list = set()
for tmpRes, in resRes:
resource_type_list.add(tmpRes)
# set all queues
nUp = 0
retMap = dict()
queue_name = site_name
for resource_type, value in iteritems(params):
tmpLog.debug('Processing rt {0} -> {1}'.format(resource_type, value))
# get num of submitted workers
varMap = dict()
varMap[':siteName'] = site_name
varMap[':resourceType'] = resource_type
varMap[':status'] = 'submitted'
self.execute(sql_count_workers, varMap)
res = self.cur.fetchone()
tmpLog.debug('{0} has {1} submitted workers'.format(resource_type, res))
nSubmittedWorkers = 0
if res is not None:
nSubmittedWorkers, = res
# set new value
# value = max(value - nSubmittedWorkers, 0)
if value is None:
value = 0
varMap = dict()
varMap[':nQueue'] = value
varMap[':siteName'] = site_name
varMap[':resourceType'] = resource_type
self.execute(sql_update_queue, varMap)
iUp = self.cur.rowcount
# iUp is 0 when nQueue is not changed
if iUp > 0 or resource_type in resource_type_list:
# a queue was updated, add the values to the map
retMap[resource_type] = value
else:
# no queue was updated, we need to create a new one for the resource type
cloned = self.clone_queue_with_new_resource_type(site_name, queue_name, resource_type, value)
if cloned:
retMap[resource_type] = value
iUp = 1
nUp += iUp
tmpLog.debug('set nNewWorkers={0} to {1}:{2} with {3}'.format(value, queue_name, resource_type, iUp))
# commit
self.commit()
tmpLog.debug('updated {0} queues'.format(nUp))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get the number of missed worker
def get_num_missed_workers(self, queue_name, criteria):
try:
# get logger
tmpLog = core_utils.make_logger(_logger,"queue={0}".format(queue_name),
method_name='get_num_missed_workers')
tmpLog.debug('start')
# get worker stats
sqlW = "SELECT COUNT(*) cnt "
sqlW += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName)
sqlW += "WHERE wt.computingSite=pq.queueName AND wt.status=:status "
# get worker stats
varMap = dict()
for attr, val in iteritems(criteria):
if attr == 'timeLimit':
sqlW += "AND wt.submitTime>:timeLimit "
varMap[':timeLimit'] = val
elif attr in ['siteName']:
sqlW += "AND pq.{0}=:{0} ".format(attr)
varMap[':{0}'.format(attr)] = val
elif attr in ['computingSite', 'computingElement']:
sqlW += "AND wt.{0}=:{0} ".format(attr)
varMap[':{0}'.format(attr)] = val
varMap[':status'] = 'missed'
self.execute(sqlW, varMap)
resW = self.cur.fetchone()
if resW is None:
nMissed = 0
else:
nMissed, = resW
# commit
self.commit()
tmpLog.debug('got nMissed={0} for {1}'.format(nMissed, str(criteria)))
return nMissed
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return 0
# get a worker
def get_workers_with_job_id(self, panda_id, use_commit=True):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(panda_id),
method_name='get_workers_with_job_id')
tmpLog.debug('start')
# sql to get workerIDs
sqlW = "SELECT workerID FROM {0} WHERE PandaID=:PandaID ".format(jobWorkerTableName)
sqlW += "ORDER BY workerID "
# sql to get a worker
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(slim=True), workTableName)
sqlG += "WHERE workerID=:workerID "
# get workerIDs
varMap = dict()
varMap[':PandaID'] = panda_id
self.execute(sqlW, varMap)
retList = []
for worker_id, in self.cur.fetchall():
# get a worker
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlG, varMap)
res = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(res, slim=True)
retList.append(workSpec)
# commit
if use_commit:
self.commit()
tmpLog.debug('got {0} workers'.format(len(retList)))
return retList
except Exception:
# roll back
if use_commit:
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# delete old process locks
def clean_process_locks(self):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='clean_process_locks')
tmpLog.debug('start')
# delete locks
sqlW = "DELETE FROM {0} ".format(processLockTableName)
# get worker stats
self.execute(sqlW)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get a process lock
def get_process_lock(self, process_name, locked_by, lock_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, "proc={0} by={1}".format(process_name, locked_by),
method_name='get_process_lock')
tmpLog.debug('start')
# delete old lock
sqlD = "DELETE FROM {0} ".format(processLockTableName)
sqlD += "WHERE lockTime<:timeLimit "
varMap = dict()
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
self.execute(sqlD, varMap)
# commit
self.commit()
# check lock
sqlC = "SELECT lockTime FROM {0} ".format(processLockTableName)
sqlC += "WHERE processName=:processName "
varMap = dict()
varMap[':processName'] = process_name
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
retVal = False
timeNow = datetime.datetime.utcnow()
if resC is None:
# insert lock if missing
sqlI = "INSERT INTO {0} ({1}) ".format(processLockTableName, ProcessLockSpec.column_names())
sqlI += ProcessLockSpec.bind_values_expression()
processLockSpec = ProcessLockSpec()
processLockSpec.processName = process_name
processLockSpec.lockedBy = locked_by
processLockSpec.lockTime = timeNow
varMap = processLockSpec.values_list()
self.execute(sqlI, varMap)
retVal = True
else:
oldLockTime, = resC
timeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
if oldLockTime <= timeLimit:
# update lock if old
sqlU = "UPDATE {0} SET lockedBy=:lockedBy,lockTime=:timeNow ".format(processLockTableName)
sqlU += "WHERE processName=:processName AND lockTime<=:timeLimit "
varMap = dict()
varMap[':processName'] = process_name
varMap[':lockedBy'] = locked_by
varMap[':timeLimit'] = timeLimit
varMap[':timeNow'] = timeNow
self.execute(sqlU, varMap)
if self.cur.rowcount > 0:
retVal = True
# commit
self.commit()
tmpLog.debug('done with {0}'.format(retVal))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# release a process lock
def release_process_lock(self, process_name, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, "proc={0} by={1}".format(process_name, locked_by),
method_name='release_process_lock')
tmpLog.debug('start')
# delete old lock
sqlC = "DELETE FROM {0} ".format(processLockTableName)
sqlC += "WHERE processName=:processName AND lockedBy=:lockedBy "
varMap = dict()
varMap[':processName'] = process_name
varMap[':lockedBy'] = locked_by
self.execute(sqlC, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get file status
def get_file_status(self, lfn, file_type, endpoint, job_status):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'lfn={0} endpoint={1}'.format(lfn, endpoint),
method_name='get_file_status')
tmpLog.debug('start')
# sql to get files
sqlF = "SELECT f.status, COUNT(*) cnt FROM {0} f, {1} j ".format(fileTableName, jobTableName)
sqlF += "WHERE j.PandaID=f.PandaID AND j.status=:jobStatus "
sqlF += "AND f.lfn=:lfn AND f.fileType=:type "
if endpoint is not None:
sqlF += "AND f.endpoint=:endpoint "
sqlF += "GROUP BY f.status "
# get files
varMap = dict()
varMap[':lfn'] = lfn
varMap[':type'] = file_type
varMap[':jobStatus'] = job_status
if endpoint is not None:
varMap[':endpoint'] = endpoint
self.execute(sqlF, varMap)
retMap = dict()
for status, cnt in self.cur.fetchall():
retMap[status] = cnt
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# change file status
def change_file_status(self, panda_id, data, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), method_name='change_file_status')
tmpLog.debug('start lockedBy={0}'.format(locked_by))
# sql to check lock of job
sqlJ = "SELECT lockedBy FROM {0} ".format(jobTableName)
sqlJ += "WHERE PandaID=:PandaID FOR UPDATE "
# sql to update files
sqlF = "UPDATE {0} ".format(fileTableName)
sqlF += "SET status=:status WHERE fileID=:fileID "
# check lock
varMap = dict()
varMap[':PandaID'] = panda_id
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
if resJ is None:
tmpLog.debug('skip since job not found')
else:
lockedBy, = resJ
if lockedBy != locked_by:
tmpLog.debug('skip since lockedBy is inconsistent in DB {0}'.format(lockedBy))
else:
# update files
for tmpFileID, tmpLFN, newStatus in data:
varMap = dict()
varMap[':fileID'] = tmpFileID
varMap[':status'] = newStatus
self.execute(sqlF, varMap)
tmpLog.debug('set new status {0} to {1}'.format(newStatus, tmpLFN))
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get group for a file
def get_group_for_file(self, lfn, file_type, endpoint):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'lfn={0} endpoint={1}'.format(lfn, endpoint),
method_name='get_group_for_file')
tmpLog.debug('start')
# sql to get group with the latest update
sqlF = "SELECT * FROM ("
sqlF += "SELECT groupID,groupStatus,groupUpdateTime FROM {0} ".format(fileTableName)
sqlF += "WHERE lfn=:lfn AND fileType=:type "
sqlF += "AND groupID IS NOT NULL AND groupStatus<>:ngStatus "
if endpoint is not None:
sqlF += "AND endpoint=:endpoint "
sqlF += "ORDER BY groupUpdateTime DESC "
sqlF += ") AS TMP LIMIT 1 "
# get group
varMap = dict()
varMap[':lfn'] = lfn
varMap[':type'] = file_type
varMap[':ngStatus'] = 'failed'
if endpoint is not None:
varMap[':endpoint'] = endpoint
self.execute(sqlF, varMap)
resF = self.cur.fetchone()
if resF is None:
retVal = None
else:
groupID, groupStatus, groupUpdateTime = resF
retVal = {'groupID': groupID, 'groupStatus': groupStatus, 'groupUpdateTime': groupUpdateTime}
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get files with a group ID
def get_files_with_group_id(self, group_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id),
method_name='get_files_with_group_id')
tmpLog.debug('start')
# sql to get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE groupID=:groupID "
# get files
varMap = dict()
varMap[':groupID'] = group_id
retList = []
self.execute(sqlF, varMap)
for resFile in self.cur.fetchall():
fileSpec = FileSpec()
fileSpec.pack(resFile)
retList.append(fileSpec)
# commit
self.commit()
tmpLog.debug('got {0} files'.format(len(retList)))
return retList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# update group status
def update_file_group_status(self, group_id, status_string):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id),
method_name='update_file_group_status')
tmpLog.debug('start')
# sql to get files
sqlF = "UPDATE {0} set groupStatus=:groupStatus ".format(fileTableName)
sqlF += "WHERE groupID=:groupID "
# get files
varMap = dict()
varMap[':groupID'] = group_id
varMap[':groupStatus'] = status_string
self.execute(sqlF, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('updated {0} files'.format(nRow))
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get file group status
def get_file_group_status(self, group_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id),
method_name='get_file_group_status')
tmpLog.debug('start')
# sql to get files
sqlF = "SELECT DISTINCT groupStatus FROM {0} ".format(fileTableName)
sqlF += "WHERE groupID=:groupID "
# get files
varMap = dict()
varMap[':groupID'] = group_id
self.execute(sqlF, varMap)
res = self.cur.fetchall()
retVal = set()
for groupStatus, in res:
retVal.add(groupStatus)
# commit
self.commit()
tmpLog.debug('get {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# lock job again
def lock_job_again(self, panda_id, time_column, lock_column, locked_by):
try:
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), method_name='lock_job_again')
tmpLog.debug('start column={0} id={1}'.format(lock_column, locked_by))
# check lock
sqlC = "SELECT {0},{1} FROM {2} ".format(lock_column, time_column, jobTableName)
sqlC += "WHERE PandaID=:pandaID "
sqlC += "FOR UPDATE "
varMap = dict()
varMap[':pandaID'] = panda_id
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
if resC is None:
retVal = False
tmpLog.debug('not found')
else:
oldLockedBy, oldLockedTime = resC
if oldLockedBy != locked_by:
tmpLog.debug('locked by another {0} at {1}'.format(oldLockedBy, oldLockedTime))
retVal = False
else:
# update locked time
sqlU = "UPDATE {0} SET {1}=:timeNow WHERE pandaID=:pandaID ".format(jobTableName, time_column)
varMap = dict()
varMap[':pandaID'] = panda_id
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlU, varMap)
retVal = True
# commit
self.commit()
tmpLog.debug('done with {0}'.format(retVal))
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# set file group
def set_file_group(self, file_specs, group_id, status_string):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id),
method_name='set_file_group')
tmpLog.debug('start')
timeNow = datetime.datetime.utcnow()
# sql to update files
sqlF = "UPDATE {0} ".format(fileTableName)
sqlF += "SET groupID=:groupID,groupStatus=:groupStatus,groupUpdateTime=:groupUpdateTime "
sqlF += "WHERE lfn=:lfn "
# update files
for fileSpec in file_specs:
varMap = dict()
varMap[':groupID'] = group_id
varMap[':groupStatus'] = status_string
varMap[':groupUpdateTime'] = timeNow
varMap[':lfn'] = fileSpec.lfn
self.execute(sqlF, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# refresh file group info
def refresh_file_group_info(self, job_spec):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(job_spec.PandaID),
method_name='refresh_file_group_info')
tmpLog.debug('start')
# sql to get info
sqlF = "SELECT groupID,groupStatus,groupUpdateTime FROM {0} ".format(fileTableName)
sqlF += "WHERE lfn=:lfn "
# get info
for fileSpec in job_spec.inFiles.union(job_spec.outFiles):
varMap = dict()
varMap[':lfn'] = fileSpec.lfn
self.execute(sqlF, varMap)
resF = self.cur.fetchone()
if resF is None:
continue
groupID, groupStatus, groupUpdateTime = resF
fileSpec.groupID = groupID
fileSpec.groupStatus = groupStatus
fileSpec.groupUpdateTime = groupUpdateTime
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# increment submission attempt
def increment_submission_attempt(self, panda_id, new_number):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(panda_id),
method_name='increment_submission_attempt')
tmpLog.debug('start with newNum={0}'.format(new_number))
# sql to update attempt number
sqlL = "UPDATE {0} SET submissionAttempts=:newNum ".format(jobTableName)
sqlL += "WHERE PandaID=:PandaID "
varMap = dict()
varMap[':PandaID'] = panda_id
varMap[':newNum'] = new_number
self.execute(sqlL, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get queue status
def get_worker_limits(self, site_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_limits')
tmpLog.debug('start')
# sql to get
sqlQ = "SELECT maxWorkers,nQueueLimitWorker,nQueueLimitWorkerRatio,"
sqlQ += "nQueueLimitWorkerMax,nQueueLimitWorkerMin FROM {0} ".format(pandaQueueTableName)
sqlQ += "WHERE siteName=:siteName AND resourceType='ANY'"
# sql to count resource types
sqlNT = "SELECT COUNT(*) cnt FROM {0} ".format(pandaQueueTableName)
sqlNT += "WHERE siteName=:siteName AND resourceType!='ANY'"
# sql to count running workers
sqlNR = "SELECT COUNT(*) cnt FROM {0} ".format(workTableName)
sqlNR += "WHERE computingSite=:computingSite AND status IN (:status1)"
# get
varMap = dict()
varMap[':siteName'] = site_name
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
# count resource types
varMap = dict()
varMap[':computingSite'] = site_name
varMap[':siteName'] = site_name
self.execute(sqlNT, varMap)
resNT = self.cur.fetchall()
# count running workers
varMap = dict()
varMap[':computingSite'] = site_name
varMap[':status1'] = 'running'
self.execute(sqlNR, varMap)
resNR = self.cur.fetchall()
# dynamic nQueueLimitWorker
retMap = dict()
nRunning = 0
nRT = 1
for cnt, in resNR:
nRunning = cnt
for cnt, in resNT:
nRT = max(nRT, cnt)
for maxWorkers, nQueueLimitWorker_orig, nQueueLimitWorkerRatio, \
nQueueLimitWorkerMax, nQueueLimitWorkerMin_orig in resQ:
if nQueueLimitWorkerRatio is not None and nQueueLimitWorkerRatio > 0:
nQueueLimitWorkerByRatio = int(nRunning * nQueueLimitWorkerRatio / 100)
nQueueLimitWorkerMin = 1
if nQueueLimitWorkerMin_orig is not None:
nQueueLimitWorkerMin = nQueueLimitWorkerMin_orig
nQueueLimitWorkerMinAllRTs = nQueueLimitWorkerMin * nRT
nQueueLimitWorker = max(nQueueLimitWorkerByRatio, nQueueLimitWorkerMinAllRTs)
nQueueLimitWorkerPerRT = max(nQueueLimitWorkerByRatio, nQueueLimitWorkerMin)
if nQueueLimitWorkerMax is not None:
nQueueLimitWorker = min(nQueueLimitWorker, nQueueLimitWorkerMax)
nQueueLimitWorkerPerRT = min(nQueueLimitWorkerPerRT, nQueueLimitWorkerMax)
elif nQueueLimitWorker_orig is not None:
nQueueLimitWorker = nQueueLimitWorker_orig
nQueueLimitWorkerPerRT = nQueueLimitWorker
else:
nQueueLimitWorker = maxWorkers
nQueueLimitWorkerPerRT = nQueueLimitWorker
nQueueLimitWorker = min(nQueueLimitWorker, maxWorkers)
retMap.update({
'maxWorkers': maxWorkers,
'nQueueLimitWorker': nQueueLimitWorker,
'nQueueLimitWorkerPerRT': nQueueLimitWorkerPerRT,
})
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get worker CE stats
def get_worker_ce_stats(self, site_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_ce_stats')
tmpLog.debug('start')
# get worker CE stats
sqlW = "SELECT wt.status,wt.computingSite,wt.computingElement,COUNT(*) cnt "
sqlW += "FROM {0} wt ".format(workTableName)
sqlW += "WHERE wt.computingSite=:siteName AND wt.status IN (:st1,:st2) "
sqlW += "GROUP BY wt.status,wt.computingElement "
# get worker CE stats
varMap = dict()
varMap[':siteName'] = site_name
varMap[':st1'] = 'running'
varMap[':st2'] = 'submitted'
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
retMap = dict()
for workerStatus, computingSite, computingElement, cnt in resW:
if computingElement not in retMap:
retMap[computingElement] = {
'running': 0,
'submitted': 0,
}
retMap[computingElement][workerStatus] = cnt
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get worker CE backend throughput
def get_worker_ce_backend_throughput(self, site_name, time_window):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_ce_backend_throughput')
tmpLog.debug('start')
# get worker CE throughput
sqlW = "SELECT wt.computingElement,wt.status,COUNT(*) cnt "
sqlW += "FROM {0} wt ".format(workTableName)
sqlW += "WHERE wt.computingSite=:siteName "
sqlW += "AND wt.status IN (:st1,:st2,:st3) "
sqlW += "AND wt.creationtime < :timeWindowMiddle "
sqlW += "AND (wt.starttime is NULL OR "
sqlW += "(wt.starttime >= :timeWindowStart AND wt.starttime < :timeWindowEnd) ) "
sqlW += "GROUP BY wt.status,wt.computingElement "
# time window start and end
timeWindowEnd = datetime.datetime.utcnow()
timeWindowStart = timeWindowEnd - datetime.timedelta(seconds=time_window)
timeWindowMiddle = timeWindowEnd - datetime.timedelta(seconds=time_window/2)
# get worker CE throughput
varMap = dict()
varMap[':siteName'] = site_name
varMap[':st1'] = 'submitted'
varMap[':st2'] = 'running'
varMap[':st3'] = 'finished'
varMap[':timeWindowStart'] = timeWindowStart
varMap[':timeWindowEnd'] = timeWindowEnd
varMap[':timeWindowMiddle'] = timeWindowMiddle
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
retMap = dict()
for computingElement, workerStatus, cnt in resW:
if computingElement not in retMap:
retMap[computingElement] = {
'submitted': 0,
'running': 0,
'finished': 0,
}
retMap[computingElement][workerStatus] = cnt
# commit
self.commit()
tmpLog.debug('got {0} with time_window={1} for site {2}'.format(
str(retMap), time_window, site_name))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# add dialog message
def add_dialog_message(self, message, level, module_name, identifier=None):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='add_dialog_message')
tmpLog.debug('start')
# delete old messages
sqlS = "SELECT diagID FROM {0} ".format(diagTableName)
sqlS += "WHERE creationTime<:timeLimit "
varMap = dict()
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(minutes=60)
self.execute(sqlS, varMap)
resS = self.cur.fetchall()
sqlD = "DELETE FROM {0} ".format(diagTableName)
sqlD += "WHERE diagID=:diagID "
for diagID, in resS:
varMap = dict()
varMap[':diagID'] = diagID
self.execute(sqlD, varMap)
# commit
self.commit()
# make spec
diagSpec = DiagSpec()
diagSpec.moduleName = module_name
diagSpec.creationTime = datetime.datetime.utcnow()
diagSpec.messageLevel = level
try:
diagSpec.identifier = identifier[:100]
except Exception:
pass
diagSpec.diagMessage = message[:500]
# insert
sqlI = "INSERT INTO {0} ({1}) ".format(diagTableName, DiagSpec.column_names())
sqlI += DiagSpec.bind_values_expression()
varMap = diagSpec.values_list()
self.execute(sqlI, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get dialog messages to send
def get_dialog_messages_to_send(self, n_messages, lock_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_dialog_messages_to_send')
tmpLog.debug('start')
# sql to select messages
sqlD = "SELECT diagID FROM {0} ".format(diagTableName)
sqlD += "WHERE (lockTime IS NULL OR lockTime<:timeLimit) "
sqlD += "ORDER BY diagID LIMIT {0} ".format(n_messages)
# sql to lock message
sqlL = "UPDATE {0} SET lockTime=:timeNow ".format(diagTableName)
sqlL += "WHERE diagID=:diagID "
sqlL += "AND (lockTime IS NULL OR lockTime<:timeLimit) "
# sql to get message
sqlM = "SELECT {0} FROM {1} ".format(DiagSpec.column_names(), diagTableName)
sqlM += "WHERE diagID=:diagID "
# select messages
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(seconds=lock_interval)
varMap = dict()
varMap[':timeLimit'] = timeLimit
self.execute(sqlD, varMap)
resD = self.cur.fetchall()
diagList = []
for diagID, in resD:
# lock
varMap = dict()
varMap[':diagID'] = diagID
varMap[':timeLimit'] = timeLimit
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
if nRow == 1:
# get
varMap = dict()
varMap[':diagID'] = diagID
self.execute(sqlM, varMap)
resM = self.cur.fetchone()
# make spec
diagSpec = DiagSpec()
diagSpec.pack(resM)
diagList.append(diagSpec)
# commit
self.commit()
tmpLog.debug('got {0} messages'.format(len(diagList)))
return diagList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# delete dialog messages
def delete_dialog_messages(self, ids):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='delete_dialog_messages')
tmpLog.debug('start')
# sql to delete message
sqlM = "DELETE FROM {0} ".format(diagTableName)
sqlM += "WHERE diagID=:diagID "
for diagID in ids:
# lock
varMap = dict()
varMap[':diagID'] = diagID
self.execute(sqlM, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# delete old jobs
def delete_old_jobs(self, timeout):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'timeout={0}'.format(timeout),
method_name='delete_old_jobs')
tmpLog.debug('start')
# sql to get old jobs to be deleted
sqlGJ = "SELECT PandaID FROM {0} ".format(jobTableName)
sqlGJ += "WHERE subStatus=:subStatus AND propagatorTime IS NULL "
sqlGJ += "AND ((modificationTime IS NOT NULL AND modificationTime<:timeLimit1) "
sqlGJ += "OR (modificationTime IS NULL AND creationTime<:timeLimit2)) "
# sql to delete job
sqlDJ = "DELETE FROM {0} ".format(jobTableName)
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations
sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDR += "WHERE PandaID=:PandaID "
# get jobs
varMap = dict()
varMap[':subStatus'] = 'done'
varMap[':timeLimit1'] = datetime.datetime.utcnow() - datetime.timedelta(hours=timeout)
varMap[':timeLimit2'] = datetime.datetime.utcnow() - datetime.timedelta(hours=timeout*2)
self.execute(sqlGJ, varMap)
resGJ = self.cur.fetchall()
nDel = 0
for pandaID, in resGJ:
varMap = dict()
varMap[':PandaID'] = pandaID
# delete job
self.execute(sqlDJ, varMap)
iDel = self.cur.rowcount
if iDel > 0:
nDel += iDel
# delete files
self.execute(sqlDF, varMap)
# delete events
self.execute(sqlDE, varMap)
# delete relations
self.execute(sqlDR, varMap)
# commit
self.commit()
tmpLog.debug('deleted {0} jobs'.format(nDel))
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get iterator of active workers to monitor fifo
def get_active_workers(self, n_workers, seconds_ago=0):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_active_workers')
tmpLog.debug('start')
# sql to get workers
sqlW = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlW += "WHERE status IN (:st_submitted,:st_running,:st_idle) "
sqlW += "AND modificationTime<:timeLimit "
sqlW += "ORDER BY modificationTime,computingSite LIMIT {0} ".format(n_workers)
varMap = dict()
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(seconds=seconds_ago)
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
def _get_workspec_from_record(rec):
workspec = WorkSpec()
workspec.pack(rec)
workspec.pandaid_list = []
return workspec
retVal = map(_get_workspec_from_record, resW)
tmpLog.debug('got {0} workers'.format(len(resW)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# lock workers for specific thread
def lock_workers(self, worker_id_list, lock_interval):
try:
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
retVal = True
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='lock_worker')
tmpLog.debug('start')
# loop
for worker_id, attrs in iteritems(worker_id_list):
varMap = dict()
varMap[':workerID'] = worker_id
varMap[':timeNow'] = timeNow
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':st1'] = WorkSpec.ST_cancelled
varMap[':st2'] = WorkSpec.ST_finished
varMap[':st3'] = WorkSpec.ST_failed
varMap[':st4'] = WorkSpec.ST_missed
# extract lockedBy
varMap[':lockedBy'] = attrs['lockedBy']
if attrs['lockedBy'] is None:
del attrs['lockedBy']
# sql to lock worker
sqlL = "UPDATE {0} SET modificationTime=:timeNow".format(workTableName)
for attrKey, attrVal in iteritems(attrs):
sqlL += ',{0}=:{0}'.format(attrKey)
varMap[':{0}'.format(attrKey)] = attrVal
sqlL += " WHERE workerID=:workerID AND (lockedBy IS NULL "
sqlL += "OR (modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL)) "
sqlL += "AND (status NOT IN (:st1,:st2,:st3,:st4)) "
# lock worker
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
tmpLog.debug('done with {0}'.format(nRow))
# false if failed to lock
if nRow == 0:
retVal = False
# commit
self.commit()
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get queue config dumps
def get_queue_config_dumps(self):
try:
retVal = dict()
configIDs = set()
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_queue_config_dumps')
tmpLog.debug('start')
# sql to get used IDs
sqlIJ = "SELECT DISTINCT configID FROM {0} ".format(jobTableName)
self.execute(sqlIJ)
resIJ = self.cur.fetchall()
for tmpID, in resIJ:
configIDs.add(tmpID)
sqlIW = "SELECT DISTINCT configID FROM {0} ".format(workTableName)
self.execute(sqlIW)
resIW = self.cur.fetchall()
for tmpID, in resIW:
configIDs.add(tmpID)
# sql to delete
sqlD = "DELETE FROM {0} WHERE configID=:configID ".format(queueConfigDumpTableName)
# sql to get config
sqlQ = "SELECT {0} FROM {1} ".format(QueueConfigDumpSpec.column_names(), queueConfigDumpTableName)
sqlQ += "FOR UPDATE "
self.execute(sqlQ)
resQs = self.cur.fetchall()
iDump = 0
iDel = 0
for resQ in resQs:
dumpSpec = QueueConfigDumpSpec()
dumpSpec.pack(resQ)
# delete if unused and too old
if dumpSpec.configID not in configIDs and dumpSpec.creationTime < timeLimit:
varMap = dict()
varMap[':configID'] = dumpSpec.configID
self.execute(sqlD, varMap)
iDel += 1
else:
retVal[dumpSpec.dumpUniqueName] = dumpSpec
iDump += 1
# commit
self.commit()
tmpLog.debug('got {0} dumps and delete {1} dumps'.format(iDump, iDel))
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return {}
# add queue config dump
def add_queue_config_dump(self, dump_spec):
try:
# sql to insert a job
sqlJ = "INSERT INTO {0} ({1}) ".format(queueConfigDumpTableName, QueueConfigDumpSpec.column_names())
sqlJ += QueueConfigDumpSpec.bind_values_expression()
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='add_queue_config_dumps')
tmpLog.debug('start for {0}'.format(dump_spec.dumpUniqueName))
varMap = dump_spec.values_list()
# insert
self.execute(sqlJ, varMap)
# commit
self.commit()
tmpLog.debug('done')
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get configID for queue config dump
def get_config_id_dump(self, dump_spec):
try:
# sql to get configID
sqlJ = "SELECT configID FROM {0} ".format(queueConfigDumpTableName)
sqlJ += "WHERE queueName=:queueName AND dumpUniqueName=:dumpUniqueName "
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_config_id_for_dump')
tmpLog.debug('start for {0}:{1}'.format(dump_spec.queueName, dump_spec.dumpUniqueName))
# get
varMap = dict()
varMap[':queueName'] = dump_spec.queueName
varMap[':dumpUniqueName'] = dump_spec.dumpUniqueName
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
if resJ is not None:
configID, = resJ
else:
configID = None
tmpLog.debug('got configID={0}'.format(configID))
# return
return configID
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return None
# purge a panda queue
def purge_pq(self, queue_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'queueName={0}'.format(queue_name),
method_name='purge_pq')
tmpLog.debug('start')
# sql to get jobs
sqlJ = "SELECT PandaID FROM {0} ".format(jobTableName)
sqlJ += "WHERE computingSite=:computingSite "
# sql to get workers
sqlW = "SELECT workerID FROM {0} ".format(workTableName)
sqlW += "WHERE computingSite=:computingSite "
# sql to get queue configs
sqlQ = "SELECT configID FROM {0} ".format(queueConfigDumpTableName)
sqlQ += "WHERE queueName=:queueName "
# sql to delete job
sqlDJ = "DELETE FROM {0} ".format(jobTableName)
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations by job
sqlDRJ = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDRJ += "WHERE PandaID=:PandaID "
# sql to delete worker
sqlDW = "DELETE FROM {0} ".format(workTableName)
sqlDW += "WHERE workerID=:workerID "
# sql to delete relations by worker
sqlDRW = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDRW += "WHERE workerID=:workerID "
# sql to delete queue config
sqlDQ = "DELETE FROM {0} ".format(queueConfigDumpTableName)
sqlDQ += "WHERE configID=:configID "
# sql to delete panda queue
sqlDP = "DELETE FROM {0} ".format(pandaQueueTableName)
sqlDP += "WHERE queueName=:queueName "
# get jobs
varMap = dict()
varMap[':computingSite'] = queue_name
self.execute(sqlJ, varMap)
resJ = self.cur.fetchall()
for pandaID, in resJ:
varMap = dict()
varMap[':PandaID'] = pandaID
# delete job
self.execute(sqlDJ, varMap)
# delete files
self.execute(sqlDF, varMap)
# delete events
self.execute(sqlDE, varMap)
# delete relations
self.execute(sqlDRJ, varMap)
# get workers
varMap = dict()
varMap[':computingSite'] = queue_name
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
for workerID, in resW:
varMap = dict()
varMap[':workerID'] = workerID
# delete workers
self.execute(sqlDW, varMap)
# delete relations
self.execute(sqlDRW, varMap)
# get queue configs
varMap = dict()
varMap[':queueName'] = queue_name
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
for configID, in resQ:
varMap = dict()
varMap[':configID'] = configID
# delete queue configs
self.execute(sqlDQ, varMap)
# delete panda queue
varMap = dict()
varMap[':queueName'] = queue_name
self.execute(sqlDP, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# disable multi workers
def disable_multi_workers(self, panda_id):
tmpLog = None
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id),
method_name='disable_multi_workers')
tmpLog.debug('start')
# sql to update flag
sqlJ = "UPDATE {0} SET moreWorkers=0 ".format(jobTableName)
sqlJ += "WHERE PandaID=:pandaID AND nWorkers IS NOT NULL AND nWorkersLimit IS NOT NULL "
sqlJ += "AND nWorkers>0 "
# set flag
varMap = dict()
varMap[':pandaID'] = panda_id
self.execute(sqlJ, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('done with {0}'.format(nRow))
# return
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return None
# update PQ table
def update_panda_queue_attribute(self, key, value, site_name=None, queue_name=None):
tmpLog = None
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'site={0} queue={1}'.format(site_name, queue_name),
method_name='update_panda_queue')
tmpLog.debug('start key={0}'.format(key))
# sql to update
sqlJ = "UPDATE {0} SET {1}=:{1} ".format(pandaQueueTableName, key)
sqlJ += "WHERE "
varMap = dict()
varMap[':{0}'.format(key)] = value
if site_name is not None:
sqlJ += "siteName=:siteName "
varMap[':siteName'] = site_name
else:
sqlJ += "queueName=:queueName "
varMap[':queueName'] = queue_name
# update
self.execute(sqlJ, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('done with {0}'.format(nRow))
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# delete orphaned job info
def delete_orphaned_job_info(self):
try:
# get logger
tmpLog = core_utils.make_logger(_logger,
method_name='delete_orphaned_job_info')
tmpLog.debug('start')
# sql to get job info to be deleted
sqlGJ = "SELECT PandaID FROM {0} "
sqlGJ += "WHERE PandaID NOT IN ("
sqlGJ += "SELECT PandaID FROM {1}) "
# sql to delete job info
sqlDJ = "DELETE FROM {0} "
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations
sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDR += "WHERE PandaID=:PandaID "
# loop over all tables
for tableName in [fileTableName, eventTableName, jobWorkerTableName]:
# get job info
self.execute(sqlGJ.format(tableName, jobTableName))
resGJ = self.cur.fetchall()
nDel = 0
for pandaID, in resGJ:
# delete
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlDJ.format(tableName), varMap)
iDel = self.cur.rowcount
if iDel > 0:
nDel += iDel
# commit
self.commit()
tmpLog.debug('deleted {0} records from {1}'.format(nDel, tableName))
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# lock worker again to feed events
def lock_worker_again_to_feed_events(self, worker_id, locked_by):
try:
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='lock_worker_again_to_feed_events')
tmpLog.debug('start id={0}'.format(locked_by))
# check lock
sqlC = "SELECT eventFeedLock,eventFeedTime FROM {0} ".format(workTableName)
sqlC += "WHERE workerID=:workerID "
sqlC += "FOR UPDATE "
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
if resC is None:
retVal = False
tmpLog.debug('not found')
else:
oldLockedBy, oldLockedTime = resC
if oldLockedBy != locked_by:
tmpLog.debug('locked by another {0} at {1}'.format(oldLockedBy, oldLockedTime))
retVal = False
else:
# update locked time
sqlU = "UPDATE {0} SET eventFeedTime=:timeNow WHERE workerID=:workerID ".format(workTableName)
varMap = dict()
varMap[':workerID'] = worker_id
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlU, varMap)
retVal = True
# commit
self.commit()
tmpLog.debug('done with {0}'.format(retVal))
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# insert service metrics
def insert_service_metrics(self, service_metric_spec):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='insert_service_metrics')
tmpLog.debug('start')
try:
sql = "INSERT INTO {0} ({1}) ".format(serviceMetricsTableName, ServiceMetricSpec.column_names())
sql += ServiceMetricSpec.bind_values_expression()
var_map = service_metric_spec.values_list()
self.execute(sql, var_map)
self.commit()
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get service metrics
def get_service_metrics(self, last_update):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_service_metrics')
tmpLog.debug('start with last_update: {0}'.format(last_update))
sql = "SELECT creationTime, hostName, metrics FROM {0} ".format(serviceMetricsTableName)
sql += "WHERE creationTime>=:last_update "
var_map = {':last_update': last_update}
self.execute(sql, var_map)
res = self.cur.fetchall()
# change datetime objects to strings for json serialization later
res_corrected = []
for entry in res:
try:
res_corrected.append([entry[0].strftime('%Y-%m-%d %H:%M:%S.%f'), entry[1], entry[2]])
except Exception:
pass
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(res)))
return res_corrected
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# release a site
def release_site(self, site_name, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='release_site')
tmpLog.debug('start')
# sql to release site
sql = "UPDATE {0} SET lockedBy=NULL ".format(pandaQueueTableName)
sql += "WHERE siteName=:siteName AND lockedBy=:lockedBy "
# release site
varMap = dict()
varMap[':siteName'] = site_name
varMap[':lockedBy'] = locked_by
self.execute(sql, varMap)
n_done = self.cur.rowcount > 0
# commit
self.commit()
if n_done >= 1:
tmpLog.debug('released {0}'.format(site_name))
else:
tmpLog.debug('found nothing to release. Skipped'.format(site_name))
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get workers via workerID
def get_workers_from_ids(self, ids):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_from_ids')
tmpLog.debug('start')
# sql to get workers
sqlW = (
"SELECT workerID,configID,mapType FROM {workTableName} "
"WHERE workerID IN ({ids_str}) "
"AND status IN (:st_submitted,:st_running,:st_idle) "
).format(workTableName=workTableName, ids_str=','.join([ str(_) for _ in ids]))
# sql to get associated workerIDs
sqlA = (
"SELECT t.workerID FROM {jobWorkerTableName} t, {jobWorkerTableName} s, {workTableName} w "
"WHERE s.PandaID=t.PandaID AND s.workerID=:workerID "
"AND w.workerID=t.workerID AND w.status IN (:st_submitted,:st_running,:st_idle) "
).format(jobWorkerTableName=jobWorkerTableName, workTableName=workTableName)
# sql to get associated workers
sqlG = (
"SELECT {0} FROM {1} "
"WHERE workerID=:workerID "
).format(WorkSpec.column_names(), workTableName)
# sql to get associated PandaIDs
sqlP = (
"SELECT PandaID FROM {0} "
"WHERE workerID=:workerID "
).format(jobWorkerTableName)
# get workerIDs
timeNow = datetime.datetime.utcnow()
varMap = dict()
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
tmpWorkers = set()
for workerID, configID, mapType in resW:
# ignore configID
if not core_utils.dynamic_plugin_change():
configID = None
tmpWorkers.add((workerID, configID, mapType))
checkedIDs = set()
retVal = {}
for workerID, configID, mapType in tmpWorkers:
# skip
if workerID in checkedIDs:
continue
# get associated workerIDs
varMap = dict()
varMap[':workerID'] = workerID
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlA, varMap)
resA = self.cur.fetchall()
workerIDtoScan = set()
for tmpWorkID, in resA:
workerIDtoScan.add(tmpWorkID)
# add original ID just in case since no relation when job is not yet bound
workerIDtoScan.add(workerID)
# use only the largest worker to avoid updating the same worker set concurrently
if mapType == WorkSpec.MT_MultiWorkers:
if workerID != min(workerIDtoScan):
continue
# get workers
queueName = None
workersList = []
for tmpWorkID in workerIDtoScan:
checkedIDs.add(tmpWorkID)
# get worker
varMap = dict()
varMap[':workerID'] = tmpWorkID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
if queueName is None:
queueName = workSpec.computingSite
workersList.append(workSpec)
# get associated PandaIDs
varMap = dict()
varMap[':workerID'] = tmpWorkID
self.execute(sqlP, varMap)
resP = self.cur.fetchall()
workSpec.pandaid_list = []
for tmpPandaID, in resP:
workSpec.pandaid_list.append(tmpPandaID)
if len(workSpec.pandaid_list) > 0:
workSpec.nJobs = len(workSpec.pandaid_list)
# commit
self.commit()
# add
if queueName is not None:
retVal.setdefault(queueName, dict())
retVal[queueName].setdefault(configID, [])
retVal[queueName][configID].append(workersList)
tmpLog.debug('got {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
| apache-2.0 | 825,545,955,905,895,200 | 44.390634 | 119 | 0.493884 | false |
Remi-C/LOD_ordering_for_patches_of_points | script/test_octree_LOD.py | 1 | 7481 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 2 22:08:22 2014
@author: remi
"""
#trying to order points by octree with python
from numpy import random, sqrt
from sklearn import preprocessing
import matplotlib.pyplot as plt
#defining a dummy entry :a random 3D pointcloud
pointcloud = random.rand(16*16,2);
index = np.arange(1,16*16+1)
#parameters
tot_level = 3 ;
#centering data so that leftmost pint is 0 abs, bottom most point is 0
pointcloud[:,0] = pointcloud[:,0]- np.amin(pointcloud[:,0]);
pointcloud[:,1] = pointcloud[:,1]- np.amin(pointcloud[:,1]);
#finding the max scaling, in X, Y or Z
max_r = max(np.amax(pointcloud[:,0])-np.amin(pointcloud[:,0]), np.amax(pointcloud[:,1])-np.amin(pointcloud[:,1]))
#dividing so max scale is 0 . Now the point cloud is between 0,1 and 0,1
pointcloud = pointcloud/ max_r ;
#we have to trick a litlle, so has that for level 3 for instance, all value are between 0 and 7 included, but not reaching 8.
pointcloud_int = np.trunc(abs((pointcloud*pow(2,tot_level)-0.0001))).astype(int)
plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro') ;
plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro') ;
plt.axis([-1, 8, -1, 8]) ;
plt.show() ;
plt.close('all');
result_point = pointcloud_int[rec_ar[:,0]]
plt.plot(result_point[:,0],result_point[:,1], 'ro') ;
rec_ar = np.array(rec)
piv_ar = np.array(piv)
plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ;
np.binary_repr(1)
def bin(s):
return str(s) if s<=1 else bin(s>>1) + str(s&1)
def testBit(int_type, offset):
mask = 1 << offset
return( (int_type & mask)>0 )
testBit(8,1)
pointcloud_bin = np.binary_repr(pointcloud_int)
pointcloud_int >> (tot_level-1) ;
#np.binary_repr(8)
( ((pointcloud_int >> 1 ) << 1) ) >> (tot_level-1) ;
testBit(pointcloud_int[:,1],3)
#cut the input point cloud into 8 based on l bit value starting form right to left
point_cloud_0_0_mask = np.logical_and((testBit(pointcloud_int[:,0],2)==0) , (testBit(pointcloud_int[:,1],2)==0) ) ;
pivot = np.array([pow(2,tot_level-1),pow(2,tot_level-1)])
pointcloud_centered = pointcloud_int - pivot
#coordinate to work :
toto = np.array([1,2,3])
testBit(toto,1)
(pointcloud_int >>1 )>>5
pow(2,4)
1<<4
#
# level 0
result = list() ;
pointcloud_int ;
index
pivot
cur_lev = 0
rec = [];
#find the 0 level point
min_point = np.argmin(np.sum(np.abs(pointcloud_int - pivot ),axis=1))
result.append(list((index[min_point],cur_lev)))
#compute the 4 sub parts
for b_x in list((0,1)) :
for b_y in list((0,1)) :
#looping on all 4 sub parts
print b_x, b_y
rec.append (np.logical_and(
(testBit(pointcloud_int[:,0],2)>0)==b_x
,(testBit(pointcloud_int[:,1],2)>0)==b_y
)
)
testBit(pointcloud_int[:,0],2)
print (testBit(pointcloud_int[:,0],2)>0==b_x) ;
print (testBit(pointcloud_int[:,1],2)>0==b_y) ;
rec[b_x,b_y] = np.logical_and((testBit(pointcloud_int[:,0],2)>0==b_x)
,(testBit(pointcloud_int[:,1],2)>0==b_y) )
print rec
np.binary_repr(pointcloud_int[:,0] )
#givne a point cloud
#compute the closest to center
def recursive_octree_ordering(point_array,index_array, center_point, level,tot_level, result,piv):
#importing necessary lib
import numpy as np;
#print for debug
# print '\n\n working on level : '+str(level);
# print 'input points: \n\t',point_array ;
# print 'index_array : \n\t',index_array;
# print 'center_point : \n\t',center_point;
# print 'level : \n\t',level;
# print 'tot_level : \n\t',tot_level;
# print 'result : \n\t',result;
#stopping condition : no points:
if len(point_array) == 0|level<=2:
return;
#updatig level;
sub_part_level = level+1 ;
print 'level ',level,' , points remaining : ',len(point_array) ;
print center_point;
piv.append(center_point);
#find the closest point to pivot
min_point = np.argmin(np.sum(np.abs(point_array - center_point ),axis=1))
result.append(list((index_array[min_point],level))) ;
#removing the found point from the array of points
#np.delete(point_array, min_point, axis=0) ;
#np.delete(index_array, min_point, axis=0) ;
#stopping if it remains only one pioint : we won't divide further, same if we have reached max depth
if (len(point_array) ==1 )|(level >= tot_level):
return;
#compute the 4 sub parts
for b_x in list((0,1)) :
for b_y in list((0,1)) :
#looping on all 4 sub parts
print (b_x*2-1), (b_y*2-1) ;
udpate_to_pivot = np.asarray([ (b_x*2-1)*(pow(2,tot_level - level -2 ))
,(b_y*2-1)*(pow(2,tot_level - level -2 ))
]);
sub_part_center_point = center_point +udpate_to_pivot;
# we want to iterateon
# we need to update : : point_array , index_array center_point , level
#update point_array and index_array : we need to find the points that are in the subparts
#update center point, we need to add/substract to previous pivot 2^level+11
#find the points concerned :
point_in_subpart_mask = np.logical_and(
testBit(point_array[:,0],tot_level - level-1) ==b_x
, testBit(point_array[:,1],tot_level - level -1) ==b_y ) ;
sub_part_points= point_array[point_in_subpart_mask];
sub_part_index = index_array[point_in_subpart_mask];
sub_part_center_point = center_point + np.asarray([
(b_x*2-1)*(pow(2,tot_level - level -2 ))
,(b_y*2-1)*(pow(2,tot_level - level -2 ))
]);
if len(sub_part_points)>=1:
recursive_octree_ordering(sub_part_points
,sub_part_index
, sub_part_center_point
, sub_part_level
, tot_level
, result
, piv);
continue;
else:
print 'at televel ',level,'bx by:',b_x,' ',b_y,' refusing to go one, ', len(sub_part_points), ' points remaining fo this'
continue;
rec = [] ;
piv = [] ;
recursive_octree_ordering(pointcloud_int,index,pivot,0,3,rec, piv );
#recursive_octree_ordering(pointcloud_int,index, np.array([2,2]),1,3,rec, piv );
piv_ar = np.array(piv)
plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ;
plot(x=pointcloud_int[:,0].T,y=pointcloud_int[:,1].T, marker='o', color='r', ls='' )
plt.plot(pointcloud_int.T, marker='o', color='r', ls='')
plt.imsave('/')
from mpl_toolkits.mplot3d import Axes3D
plt.scatter(pointcloud[:,0], pointcloud[:,1],c='red');
plt.scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green');
plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro')
plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro')
plt.axis([-1, 8, -1, 8])
plt.show();
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1]);
ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1], pointcloud_int[:,0], zdir='z', c= 'red')
fig.show()
fig, axes = plt.subplots(1, 2, figsize=(12,3))
axes[0].scatter(pointcloud[:,0], pointcloud[:,1],c='red');
axes[1].scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green');
fig.show();
for f in list((0,1)):
(f*2-1)
import octree_ordering | lgpl-3.0 | 6,503,298,321,312,166,000 | 31.672489 | 137 | 0.586285 | false |
maxive/erp | addons/mail/models/update.py | 12 | 5056 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import logging
import requests
import werkzeug.urls
from ast import literal_eval
from odoo import api, release, SUPERUSER_ID
from odoo.exceptions import UserError
from odoo.models import AbstractModel
from odoo.tools.translate import _
from odoo.tools import config, misc, ustr
_logger = logging.getLogger(__name__)
class PublisherWarrantyContract(AbstractModel):
_name = "publisher_warranty.contract"
@api.model
def _get_message(self):
Users = self.env['res.users']
IrParamSudo = self.env['ir.config_parameter'].sudo()
dbuuid = IrParamSudo.get_param('database.uuid')
db_create_date = IrParamSudo.get_param('database.create_date')
limit_date = datetime.datetime.now()
limit_date = limit_date - datetime.timedelta(15)
limit_date_str = limit_date.strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT)
nbr_users = Users.search_count([('active', '=', True)])
nbr_active_users = Users.search_count([("login_date", ">=", limit_date_str), ('active', '=', True)])
nbr_share_users = 0
nbr_active_share_users = 0
if "share" in Users._fields:
nbr_share_users = Users.search_count([("share", "=", True), ('active', '=', True)])
nbr_active_share_users = Users.search_count([("share", "=", True), ("login_date", ">=", limit_date_str), ('active', '=', True)])
user = self.env.user
domain = [('application', '=', True), ('state', 'in', ['installed', 'to upgrade', 'to remove'])]
apps = self.env['ir.module.module'].sudo().search_read(domain, ['name'])
enterprise_code = IrParamSudo.get_param('database.enterprise_code')
web_base_url = IrParamSudo.get_param('web.base.url')
msg = {
"dbuuid": dbuuid,
"nbr_users": nbr_users,
"nbr_active_users": nbr_active_users,
"nbr_share_users": nbr_share_users,
"nbr_active_share_users": nbr_active_share_users,
"dbname": self._cr.dbname,
"db_create_date": db_create_date,
"version": release.version,
"language": user.lang,
"web_base_url": web_base_url,
"apps": [app['name'] for app in apps],
"enterprise_code": enterprise_code,
}
if user.partner_id.company_id:
company_id = user.partner_id.company_id
msg.update(company_id.read(["name", "email", "phone"])[0])
return msg
@api.model
def _get_sys_logs(self):
"""
Utility method to send a publisher warranty get logs messages.
"""
msg = self._get_message()
arguments = {'arg0': ustr(msg), "action": "update"}
url = config.get("publisher_warranty_url")
r = requests.post(url, data=arguments, timeout=30)
r.raise_for_status()
return literal_eval(r.text)
@api.multi
def update_notification(self, cron_mode=True):
"""
Send a message to Odoo's publisher warranty server to check the
validity of the contracts, get notifications, etc...
@param cron_mode: If true, catch all exceptions (appropriate for usage in a cron).
@type cron_mode: boolean
"""
try:
try:
result = self._get_sys_logs()
except Exception:
if cron_mode: # we don't want to see any stack trace in cron
return False
_logger.debug("Exception while sending a get logs messages", exc_info=1)
raise UserError(_("Error during communication with the publisher warranty server."))
# old behavior based on res.log; now on mail.message, that is not necessarily installed
user = self.env['res.users'].sudo().browse(SUPERUSER_ID)
poster = self.sudo().env.ref('mail.channel_all_employees')
if not (poster and poster.exists()):
if not user.exists():
return True
poster = user
for message in result["messages"]:
try:
poster.message_post(body=message, subtype='mt_comment', partner_ids=[user.partner_id.id])
except Exception:
pass
if result.get('enterprise_info'):
# Update expiration date
set_param = self.env['ir.config_parameter'].sudo().set_param
set_param('database.expiration_date', result['enterprise_info'].get('expiration_date'))
set_param('database.expiration_reason', result['enterprise_info'].get('expiration_reason', 'trial'))
set_param('database.enterprise_code', result['enterprise_info'].get('enterprise_code'))
except Exception:
if cron_mode:
return False # we don't want to see any stack trace in cron
else:
raise
return True
| agpl-3.0 | -3,973,846,673,264,079,000 | 40.442623 | 140 | 0.584652 | false |
mgr01/antfs-cli | setup.py | 1 | 2385 | # antfs-cli distutils setup script
#
# Copyright (c) 2012, Gustav Tiger <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, print_function
from setuptools import setup
try:
with open('README.md') as file:
long_description = file.read()
except IOError:
long_description = ''
setup(name='antfs-cli',
version='0.2',
description='ANT-FS Command Line Interface',
long_description=long_description,
author='Gustav Tiger',
author_email='[email protected]',
packages=['antfs_cli'],
entry_points={
'console_scripts': ['antfs-cli=antfs_cli.program:main']
},
url='https://github.com/Tigge/antfs-cli',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Healthcare Industry',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
dependency_links=['git+https://github.com/Tigge/openant.git#egg=openant-0.2'],
install_requires=['openant>=0.2'],
test_suite='tests')
| mit | -6,343,828,656,502,855,000 | 38.098361 | 85 | 0.67631 | false |
sclaggett/NetworkMonitor | src/PingThread.py | 1 | 5327 | """ PingThread.py
Thread that pings the target server """
# Imports
import NetworkMonitor
import re
import ThreadBase
import time
import subprocess
class PingThread(ThreadBase.ThreadBase):
def __init__(self, platform, targetServer, interval, logFile, outputFile):
"""Constructor"""
# Initialize variables
super(PingThread, self).__init__("Ping", interval, logFile, outputFile)
self.platform = platform
self.targetServer = targetServer
self.outputFile.SetFileHeader("Packets sent\tPackets received\tMinimum (ms)\tAverage (ms)\tMaximum (ms)\tStdev (ms)\tTotal time(s)")
if self.platform == NetworkMonitor.PLATFORM_LINUX:
# Ping output lines of interest look like the following on Ubuntu:
# 4 packets transmitted, 4 received, 0% packet loss, time 3004ms
# rtt min/avg/max/mdev = 47.014/51.046/62.049/6.368 ms
self.command = "ping -c 4 -q %s" % self.targetServer
self.regEx1 = re.compile("([0-9]+) packets transmitted, ([0-9]+) received, ([0-9.]+)% packet loss, time ([0-9]+)ms")
self.regEx1Groups = 4
self.regEx2 = re.compile("rtt min/avg/max/mdev = ([0-9.]+)/([0-9.]+)/([0-9.]+)/([0-9.]+) ms")
self.regEx2Groups = 4
elif self.platform == NetworkMonitor.PLATFORM_MACOS:
# Ping output lines of interest look like the following on Mac OS:
# 4 packets transmitted, 4 packets received, 0.0% packet loss
# round-trip min/avg/max/stddev = 47.399/48.315/50.227/1.117 ms
self.command = "ping -c 4 -q %s" % self.targetServer
self.regEx1 = re.compile("([0-9]+) packets transmitted, ([0-9]+) packets received, ([0-9.]+)% packet loss")
self.regEx1Groups = 3
self.regEx2 = re.compile("round-trip min/avg/max/stddev = ([0-9.]+)/([0-9.]+)/([0-9.]+)/([0-9.]+) ms")
self.regEx2Groups = 4
elif self.platform == NetworkMonitor.PLATFORM_WINDOWS:
# Ping output lines of interest look like the following on Windows:
# Packets: Sent = 4, Received = 4, Lost = 0 (0% loss),
# Minimum = 45ms, Maximum = 58ms, Average = 49ms
self.command = "ping -n 4 %s" % self.targetServer
self.regEx1 = re.compile("Packets: Sent = ([0-9]+), Received = ([0-9]+), Lost = ([0-9]+) \(([0-9.]+)% loss\),")
self.regEx1Groups = 4
self.regEx2 = re.compile("Minimum = ([0-9.]+)ms, Maximum = ([0-9.]+)ms, Average = ([0-9.]+)ms")
self.regEx2Groups = 3
else:
raise Exception("Unknown platform: " + self.platform)
def PingServer(self):
"""Pings the server four times and returns statistics"""
# Create the process and read in the output lines
proc = subprocess.Popen(self.command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
lines = proc.stdout.readlines()
# Windows requires each line to be decoded
if self.platform == NetworkMonitor.PLATFORM_WINDOWS:
newLines = []
for line in lines:
newLines.append(line.decode("utf-8"))
lines = newLines
# Wait until the process completes and parse the output
proc.wait()
packetsTransmitted = -1
packetsReceived = -1
min = -1
avg = -1
max = -1
stdev = -1
for line in lines:
strippedLine = line.strip()
result = self.regEx1.match(strippedLine)
if (result is not None) and (result.lastindex >= self.regEx1Groups):
packetsTransmitted = int(result.group(1))
packetsReceived = int(result.group(2))
result = self.regEx2.match(strippedLine)
if (result is not None) and (result.lastindex >= self.regEx2Groups):
min = float(result.group(1))
if self.platform != NetworkMonitor.PLATFORM_WINDOWS:
avg = float(result.group(2))
max = float(result.group(3))
else:
max = float(result.group(2))
avg = float(result.group(3))
if self.regEx2Groups == 4:
stdev = float(result.group(4))
if packetsTransmitted == -1:
# Failed to parse the output
if proc.returncode != 0:
error = "Ping command failed with the following output:\n"
else:
error = "Failed to parse ping output:\n"
for line in lines:
error += line
self.logFile.Write(error)
return (packetsTransmitted, packetsReceived, min, avg, max, stdev)
def Capture(self):
"""Pings the server and writes the statistics as the next data point"""
# Gather ping data
startTime = time.time()
pingPacketsTransmitted, pingPacketsReceived, pingMin, pingAverage, pingMax, pingStdev = self.PingServer()
elapsedTime = time.time() - startTime
# Write out the data point
self.outputFile.Write("%i\t%i\t%0.3f\t%0.3f\t%0.3f\t%0.3f\t%0.2f" % (pingPacketsTransmitted, pingPacketsReceived, pingMin, pingAverage, pingMax, pingStdev, elapsedTime))
| cc0-1.0 | 8,206,923,105,654,178,000 | 47.427273 | 177 | 0.575934 | false |
ZeitOnline/zeit.cms | src/zeit/cms/workingcopy/workingcopy.py | 1 | 5395 | import grokcore.component
import z3c.traverser.interfaces
import zeit.cms.workingcopy.interfaces
import zope.app.container.btree
import zope.app.security.interfaces
import zope.component
import zope.dublincore.interfaces
import zope.interface
import zope.publisher.interfaces
import zope.security.interfaces
import zope.security.management
import zope.securitypolicy.interfaces
class Workingcopy(zope.app.container.btree.BTreeContainer):
"""The working copy is the area of the CMS where users edit content."""
zope.interface.implements(zeit.cms.workingcopy.interfaces.IWorkingcopy)
_order = ()
temporary = False # avoid migration of existing objects
def __init__(self, temporary=False):
super(Workingcopy, self).__init__()
self.temporary = temporary
def __iter__(self):
for key in reversed(self._order):
yield key
for key in super(Workingcopy, self).__iter__():
if key in self._order:
continue
yield key
def values(self):
for key in self:
yield self[key]
def __setitem__(self, key, item):
if not zeit.cms.workingcopy.interfaces.ILocalContent.providedBy(item):
raise ValueError("Must provide ILocalContent")
super(Workingcopy, self).__setitem__(key, item)
self._order += (key, )
def __delitem__(self, key):
super(Workingcopy, self).__delitem__(key)
order = list(self._order)
try:
order.remove(key)
except ValueError:
pass
else:
self._order = tuple(order)
class WorkingcopyLocation(zope.app.container.btree.BTreeContainer):
"""Location for working copies of all users."""
zope.interface.implements(
zeit.cms.workingcopy.interfaces.IWorkingcopyLocation)
def getWorkingcopy(self):
"""Get the working copy for the currently logged in user."""
principal = self._get_principal()
return self.getWorkingcopyFor(principal)
def getWorkingcopyFor(self, principal):
principal_id = principal.id
try:
result = self[principal_id]
except KeyError:
# User doesn't have a working copy yet, create one
result = self[principal_id] = Workingcopy()
perms = (
zope.securitypolicy.interfaces.IPrincipalPermissionManager(
result))
perms.grantPermissionToPrincipal('zeit.EditContent', principal_id)
prm = zope.securitypolicy.interfaces.IPrincipalRoleManager(
result)
prm.assignRoleToPrincipal('zeit.Owner', principal_id)
try:
dc = zope.dublincore.interfaces.IDCDescriptiveProperties(
result)
except TypeError:
pass
else:
if principal.title:
dc.title = principal.title
if principal.description:
dc.description = principal.description
return result
def _get_principal(self):
# Find the current principal. Note that it is possible for there
# to be more than one principal - in this case we throw an error.
interaction = zope.security.management.getInteraction()
principal = None
for p in interaction.participations:
if principal is None:
principal = p.principal
else:
raise ValueError("Multiple principals found")
if principal is None:
raise ValueError("No principal found")
return principal
@zope.component.adapter(zope.security.interfaces.IPrincipal)
@zope.interface.implementer(zeit.cms.workingcopy.interfaces.IWorkingcopy)
def principalAdapter(context):
location = zope.component.getUtility(
zeit.cms.workingcopy.interfaces.IWorkingcopyLocation)
return location.getWorkingcopyFor(context)
@grokcore.component.adapter(None)
@grokcore.component.implementer(zeit.cms.workingcopy.interfaces.IWorkingcopy)
def workingcopy_for_current_principal(ignored):
# Find the current principal. Note that it is possible for there
# to be more than one principal - in this case adapting fails
try:
interaction = zope.security.management.getInteraction()
except zope.security.interfaces.NoInteraction:
return
principal = None
for p in interaction.participations:
if principal is None:
principal = p.principal
else:
return
if principal is None:
return
return zeit.cms.workingcopy.interfaces.IWorkingcopy(principal, None)
class WorkingcopyTraverser(object):
"""Traverses to working copies, creating them on the fly."""
zope.interface.implements(z3c.traverser.interfaces.IPluggableTraverser)
def __init__(self, context, request):
self.context = context
self.request = request
def publishTraverse(self, request, name):
auth = zope.component.getUtility(
zope.app.security.interfaces.IAuthentication)
try:
principal = auth.getPrincipal(name)
except zope.app.security.interfaces.PrincipalLookupError:
raise zope.publisher.interfaces.NotFound(
self.context, name, request)
return zeit.cms.workingcopy.interfaces.IWorkingcopy(principal)
| bsd-3-clause | -4,825,881,447,363,589,000 | 34.032468 | 78 | 0.653383 | false |
joaduo/mepinta | plugins/c_and_cpp/k3dv1/plugins_tests/inotify_tests/SelectFaceByNumber_test.py | 1 | 2567 | # -*- coding: utf-8 -*-
'''
Mepinta
Copyright (c) 2011-2012, Joaquin G. Duo
This file is part of Mepinta.
Mepinta is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mepinta is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mepinta. If not, see <http://www.gnu.org/licenses/>.
'''
from getDefaultContext import getDefaultContext
from pipeline_backend.logging.logging import LOG_INFO
from plugins_tests.base.K3dMeshPluginTest import K3dMeshPluginTest
class SelectFaceByNumber(K3dMeshPluginTest):
def __post_init__(self):
import plugins.c_and_cpp.processors.k3dv1.mesh.selection.bynumber.SelectFaceByNumber as select
self.testedProcessors.append(select)
def _createInputMesh(self, test_pline):
import plugins.c_and_cpp.processors.k3dv1.mesh.input.file.OBJMeshReader as obj_rdr
obj_node = test_pline.append(obj_rdr)
test_pline.setValue(obj_node.inputs.file, '/home/jduo/output.obj')
test_pline.defaultMarked.append(obj_node.functions.loadMesh)
return obj_node
def definePluginPipeline(self, test_pline):
select = self.testedProcessors[0]
n_sel = test_pline.append(select)
import plugins.c_and_cpp.processors.k3dv1.mesh.modifiers.polyhedron.ExtrudeFaces as ext_fac
n_ext = test_pline.append(ext_fac)
test_pline.setValue(n_sel.inputs.primitive_number, 0)
test_pline.setValue(n_sel.inputs.face_index, 0)
test_pline.setValue(n_ext.inputs.segments, 2)
test_pline.setValue(n_ext.inputs.distance, 4.0)
def getTimeParameters(self):
return self.time.startEndStepSleep(end=-15., step=2, sleep=0.1)
def stressPipeline(self, test_pline, time):
nodes = test_pline.getNodesDict()
node_sel = nodes['SelectFaceByNumber 1']
test_pline.setValue(node_sel.inputs.face_index, time *2)
node_ext = nodes['ExtrudeFaces 1']
test_pline.setValue(node_ext.inputs.distance, 1.0 + time/4.0)
test_pline.setValue(node_ext.inputs.segments, 1.0 + time/4.0)
test = SelectFaceByNumber
if __name__ == "__main__":
sfbn = SelectFaceByNumber(getDefaultContext(LOG_INFO))
| gpl-3.0 | 727,680,934,833,170,600 | 37.893939 | 102 | 0.721465 | false |
adamwiggins/cocos2d | cocos/actions/interval_actions.py | 2 | 20878 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Interval Action
Interval Actions
================
An interval action is an action that takes place within a certain period of time.
It has an start time, and a finish time. The finish time is the parameter
``duration`` plus the start time.
These `IntervalAction` have some interesting properties, like:
- They can run normally (default)
- They can run reversed with the `Reverse` action.
- They can run with the time altered with the `Accelerate`, `AccelDeccel` and
`Speed` actions.
For example, you can simulate a Ping Pong effect running the action normally and
then running it again in Reverse mode.
Example::
ping_pong_action = action + Reverse( action )
Available IntervalActions
=========================
* `MoveTo`
* `MoveBy`
* `JumpTo`
* `JumpBy`
* `Bezier`
* `Blink`
* `RotateTo`
* `RotateBy`
* `ScaleTo`
* `ScaleBy`
* `FadeOut`
* `FadeIn`
* `FadeTo`
* `Delay`
* `RandomDelay`
Modifier actions
================
* `Accelerate`
* `AccelDeccel`
* `Speed`
Examples::
move = MoveBy( (200,0), duration=5 ) # Moves 200 pixels to the right in 5 seconds.
move = MoveTo( (320,240), duration=5) # Moves to the pixel (320,240) in 5 seconds
jump = JumpBy( (320,0), 100, 5, duration=5) # Jumps to the right 320 pixels
# doing 5 jumps of 100 pixels
# of height in 5 seconds
accel_move = Accelerate(move) # accelerates action move
'''
__docformat__ = 'restructuredtext'
import random
import copy
import math
from base_actions import *
from cocos.euclid import *
__all__ = [ 'Lerp', # interpolation
'MoveTo','MoveBy', # movement actions
'Jump', 'JumpTo', 'JumpBy',
'Bezier', # complex movement actions
'Rotate',"RotateTo", "RotateBy", # object rotation
'ScaleTo','ScaleBy', # object scale
'Delay','RandomDelay', # Delays
'FadeOut','FadeIn','FadeTo', # Fades in/out action
'Blink', # Blink action
'Accelerate','AccelDeccel','Speed', # Time alter actions
]
class Lerp( IntervalAction ):
"""
Interpolate between values for some specified attribute
"""
def init(self, attrib, start, end, duration):
"""Init method.
:Parameters:
`attrib` : string
The name of the attrbiute where the value is stored
`start` : float
The start value
`end` : float
The end value
`duration` : float
Duration time in seconds
"""
self.attrib = attrib
self.duration = duration
self.start_p = start
self.end_p = end
self.delta = end-start
def update(self, t):
setattr(self.target, self.attrib,
self.start_p + self.delta * t
)
def __reversed__(self):
return Lerp(self.attrib, self.end_p, self.start_p, self.duration)
class RotateBy( IntervalAction ):
"""Rotates a `CocosNode` object clockwise a number of degrees
by modiying it's rotation attribute.
Example::
# rotates the sprite 180 degrees in 2 seconds
action = RotateBy( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Degrees that the sprite will be rotated.
Positive degrees rotates the sprite clockwise.
`duration` : float
Duration time in seconds
"""
self.angle = angle #: Quantity of degrees to rotate
self.duration = duration #: Duration in seconds
def start( self ):
self.start_angle = self.target.rotation
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateBy(-self.angle, self.duration)
Rotate = RotateBy
class RotateTo( IntervalAction ):
"""Rotates a `CocosNode` object to a certain angle by modifying it's
rotation attribute.
The direction will be decided by the shortest angle.
Example::
# rotates the sprite to angle 180 in 2 seconds
action = RotateTo( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Destination angle in degrees.
`duration` : float
Duration time in seconds
"""
self.angle = angle%360 #: Destination angle in degrees
self.duration = duration #: Duration in seconds
def start( self ):
ea = self.angle
sa = self.start_angle = (self.target.rotation%360)
self.angle = ((ea%360) - (sa%360))
if self.angle > 180:
self.angle = -360+self.angle
if self.angle < -180:
self.angle = 360+self.angle
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateTo(-self.angle, self.duration)
class Speed( IntervalAction ):
"""
Changes the speed of an action, making it take longer (speed>1)
or less (speed<1)
Example::
# rotates the sprite 180 degrees in 1 secondclockwise
action = Speed( Rotate( 180, 2 ), 2 )
sprite.do( action )
"""
def init(self, other, speed ):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`speed` : float
The speed change. 1 is no change.
2 means twice as fast, takes half the time
0.5 means half as fast, takes double the time
"""
self.other = other
self.speed = speed
self.duration = other.duration/speed
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
self.other.update( t )
def __reversed__(self):
return Speed( Reverse( self.other ), self.speed )
class Accelerate( IntervalAction ):
"""
Changes the acceleration of an action
Example::
# rotates the sprite 180 degrees in 2 seconds clockwise
# it starts slow and ends fast
action = Accelerate( Rotate( 180, 2 ), 4 )
sprite.do( action )
"""
def init(self, other, rate = 2):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`rate` : float
The acceleration rate. 1 is linear.
the new t is t**rate
"""
self.other = other
self.rate = rate
self.duration = other.duration
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
self.other.update( t**self.rate )
def __reversed__(self):
return Accelerate(Reverse(self.other), 1.0/self.rate)
class AccelDeccel( IntervalAction ):
"""
Makes an action change the travel speed but retain near normal
speed at the beginning and ending.
Example::
# rotates the sprite 180 degrees in 2 seconds clockwise
# it starts slow, gets fast and ends slow
action = AccelDeccel( RotateBy( 180, 2 ) )
sprite.do( action )
"""
def init(self, other):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
"""
self.other = other
self.duration = other.duration
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
ft = (t-0.5) * 12
nt = 1./( 1. + math.exp(-ft) )
self.other.update( nt )
def __reversed__(self):
return AccelDeccel( Reverse(self.other) )
class MoveTo( IntervalAction ):
"""Moves a `CocosNode` object to the position x,y. x and y are absolute coordinates
by modifying it's position attribute.
Example::
# Move the sprite to coords x=50, y=10 in 8 seconds
action = MoveTo( (50,10), 8 )
sprite.do( action )
"""
def init(self, dst_coords, duration=5):
"""Init method.
:Parameters:
`dst_coords` : (x,y)
Coordinates where the sprite will be placed at the end of the action
`duration` : float
Duration time in seconds
"""
self.end_position = Point2( *dst_coords )
self.duration = duration
def start( self ):
self.start_position = self.target.position
self.delta = self.end_position-self.start_position
def update(self,t):
self.target.position = self.start_position + self.delta * t
class MoveBy( MoveTo ):
"""Moves a `CocosNode` object x,y pixels by modifying it's
position attribute.
x and y are relative to the position of the object.
Duration is is seconds.
Example::
# Move the sprite 50 pixels to the left in 8 seconds
action = MoveBy( (-50,0), 8 )
sprite.do( action )
"""
def init(self, delta, duration=5):
"""Init method.
:Parameters:
`delta` : (x,y)
Delta coordinates
`duration` : float
Duration time in seconds
"""
self.delta = Point2( *delta )
self.duration = duration
def start( self ):
self.start_position = self.target.position
self.end_position = self.start_position + self.delta
def __reversed__(self):
return MoveBy(-self.delta, self.duration)
class FadeOut( IntervalAction ):
"""Fades out a `CocosNode` object by modifying it's opacity attribute.
Example::
action = FadeOut( 2 )
sprite.do( action )
"""
def init( self, duration ):
"""Init method.
:Parameters:
`duration` : float
Seconds that it will take to fade
"""
self.duration = duration
def update( self, t ):
self.target.opacity = 255 * (1-t)
def __reversed__(self):
return FadeIn( self.duration )
class FadeTo( IntervalAction ):
"""Fades a `CocosNode` object to a specific alpha value by modifying it's opacity attribute.
Example::
action = FadeTo( 128, 2 )
sprite.do( action )
"""
def init( self, alpha, duration ):
"""Init method.
:Parameters:
`alpha` : float
0-255 value of opacity
`duration` : float
Seconds that it will take to fade
"""
self.alpha = alpha
self.duration = duration
def start(self):
self.start_alpha = self.target.opacity
def update( self, t ):
self.target.opacity = self.start_alpha + (
self.alpha - self.start_alpha
) * t
class FadeIn( FadeOut):
"""Fades in a `CocosNode` object by modifying it's opacity attribute.
Example::
action = FadeIn( 2 )
sprite.do( action )
"""
def update( self, t ):
self.target.opacity = 255 * t
def __reversed__(self):
return FadeOut( self.duration )
class ScaleTo(IntervalAction):
"""Scales a `CocosNode` object to a zoom factor by modifying it's scale attribute.
Example::
# scales the sprite to 5x in 2 seconds
action = ScaleTo( 5, 2 )
sprite.do( action )
"""
def init(self, scale, duration=5 ):
"""Init method.
:Parameters:
`scale` : float
scale factor
`duration` : float
Duration time in seconds
"""
self.end_scale = scale
self.duration = duration
def start( self ):
self.start_scale = self.target.scale
self.delta = self.end_scale-self.start_scale
def update(self, t):
self.target.scale = self.start_scale + self.delta * t
class ScaleBy(ScaleTo):
"""Scales a `CocosNode` object a zoom factor by modifying it's scale attribute.
Example::
# scales the sprite by 5x in 2 seconds
action = ScaleBy( 5, 2 )
sprite.do( action )
"""
def start( self ):
self.start_scale = self.target.scale
self.delta = self.start_scale*self.end_scale - self.start_scale
def __reversed__(self):
return ScaleBy( 1.0/self.end_scale, self.duration )
class Blink( IntervalAction ):
"""Blinks a `CocosNode` object by modifying it's visible attribute
Example::
# Blinks 10 times in 2 seconds
action = Blink( 10, 2 )
sprite.do( action )
"""
def init(self, times, duration):
"""Init method.
:Parameters:
`times` : integer
Number of times to blink
`duration` : float
Duration time in seconds
"""
self.times = times
self.duration = duration
def update(self, t):
slice = 1 / float( self.times )
m = t % slice
self.target.visible = (m > slice / 2.0)
def __reversed__(self):
return self
class Bezier( IntervalAction ):
"""Moves a `CocosNode` object through a bezier path by modifying it's position attribute.
Example::
action = Bezier( bezier_conf.path1, 5 ) # Moves the sprite using the
sprite.do( action ) # bezier path 'bezier_conf.path1'
# in 5 seconds
"""
def init(self, bezier, duration=5, forward=True):
"""Init method
:Parameters:
`bezier` : bezier_configuration instance
A bezier configuration
`duration` : float
Duration time in seconds
"""
self.duration = duration
self.bezier = bezier
self.forward = forward
def start( self ):
self.start_position = self.target.position
def update(self,t):
if self.forward:
p = self.bezier.at( t )
else:
p = self.bezier.at( 1-t )
self.target.position = ( self.start_position +Point2( *p ) )
def __reversed__(self):
return Bezier(self.bezier, self.duration, not self.forward)
class Jump(IntervalAction):
"""Moves a `CocosNode` object simulating a jump movement by modifying it's position attribute.
Example::
action = Jump(50,200, 5, 6) # Move the sprite 200 pixels to the right
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 50 pixels of height
"""
def init(self, y=150, x=120, jumps=1, duration=5):
"""Init method
:Parameters:
`y` : integer
Height of jumps
`x` : integer
horizontal movement relative to the startin position
`jumps` : integer
quantity of jumps
`duration` : float
Duration time in seconds
"""
import warnings
warnings.warn('Deprecated "Jump" action. Consider using JumpBy instead', DeprecationWarning)
self.y = y
self.x = x
self.duration = duration
self.jumps = jumps
def start( self ):
self.start_position = self.target.position
def update(self, t):
y = int( self.y * abs( math.sin( t * math.pi * self.jumps ) ) )
x = self.x * t
self.target.position = self.start_position + Point2(x,y)
def __reversed__(self):
return Jump(self.y, -self.x, self.jumps, self.duration)
class JumpBy(IntervalAction):
"""Moves a `CocosNode` object simulating a jump movement by modifying it's position attribute.
Example::
# Move the sprite 200 pixels to the right and up
action = JumpBy((100,100),200, 5, 6)
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 200 pixels of height
"""
def init(self, position=(0,0), height=100, jumps=1, duration=5):
"""Init method
:Parameters:
`position` : integer x integer tuple
horizontal and vertical movement relative to the
starting position
`height` : integer
Height of jumps
`jumps` : integer
quantity of jumps
`duration` : float
Duration time in seconds
"""
self.position = position
self.height = height
self.duration = duration
self.jumps = jumps
def start( self ):
self.start_position = self.target.position
self.delta = Vector2(*self.position)
def update(self, t):
y = int( self.height * abs( math.sin( t * math.pi * self.jumps ) ) )
y += self.delta[1] * t
x = self.delta[0] * t
self.target.position = self.start_position + Point2(x,y)
def __reversed__(self):
return JumpBy( (-self.position[0],-self.position[1]), self.height, self.jumps, self.duration)
class JumpTo(JumpBy):
"""Moves a `CocosNode` object to a position simulating a jump movement by modifying
it's position attribute.
Example::
action = JumpTo(50,200, 5, 6) # Move the sprite 200 pixels to the right
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 50 pixels of height
"""
def start( self ):
self.start_position = self.target.position
self.delta = Vector2(*self.position)-self.start_position
class Delay(IntervalAction):
"""Delays the action a certain amount of seconds
Example::
action = Delay(2.5)
sprite.do( action )
"""
def init(self, delay):
"""Init method
:Parameters:
`delay` : float
Seconds of delay
"""
self.duration = delay
def __reversed__(self):
return self
class RandomDelay(Delay):
"""Delays the actions between *min* and *max* seconds
Example::
action = RandomDelay(2.5, 4.5) # delays the action between 2.5 and 4.5 seconds
sprite.do( action )
"""
def init(self, low, hi):
"""Init method
:Parameters:
`low` : float
Minimun seconds of delay
`hi` : float
Maximun seconds of delay
"""
self.low = low
self.hi = hi
def __deepcopy__(self, memo):
new = copy.copy(self)
new.duration = self.low + (random.random() * (self.hi - self.low))
return new
| bsd-3-clause | -743,601,364,381,472,400 | 27.757576 | 101 | 0.563943 | false |
tiagocoutinho/bliss | bliss/controllers/lima/__init__.py | 1 | 6837 | # -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2016 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
import importlib
from bliss.common.tango import DeviceProxy
from bliss.config import settings
from .bpm import Bpm
from .roi import Roi, RoiCounters
class Lima(object):
ROI_COUNTERS = 'roicounter'
BPM = 'beamviewer'
class Image(object):
ROTATION_0,ROTATION_90,ROTATION_180,ROTATION_270 = range(4)
def __init__(self,proxy):
self._proxy = proxy
@property
def proxy(self):
return self._proxy
@property
def bin(self):
return self._proxy.image_bin
@bin.setter
def bin(self,values):
self._proxy.image_bin = values
@property
def flip(self):
return self._proxy.image_flip
@flip.setter
def flip(self,values):
self._proxy.image_flip = values
@property
def roi(self):
return Roi(*self._proxy.image_roi)
@roi.setter
def roi(self,roi_values):
if len(roi_values) == 4:
self._proxy.image_roi = roi_values
elif isinstance(roi_values[0],Roi):
roi = roi_values[0]
self._proxy.image_roi = (roi.x,roi.y,
roi.width,roi.height)
else:
raise TypeError("Lima.image: set roi only accepts roi (class)"
" or (x,y,width,height) values")
@property
def rotation(self):
rot_str = self._proxy.image_rotation
return {'NONE' : self.ROTATION_0,
'90' : self.ROTATION_90,
'180' : self.ROTATION_180,
'270' : self.ROTATION_270}.get(rot_str)
@rotation.setter
def rotation(self,rotation):
if isinstance(rotation,(str,unicode)):
self._proxy.image_rotation = rotation
else:
rot_str = {self.ROTATION_0 : 'NONE',
self.ROTATION_90 : '90',
self.ROTATION_180 : '180',
self.ROTATION_270 : '270'}.get(rotation)
if rot_str is None:
raise ValueError("Lima.image: rotation can only be 0,90,180 or 270")
self._proxy.image_rotation = rot_str
class Acquisition(object):
ACQ_MODE_SINGLE,ACQ_MODE_CONCATENATION,ACQ_MODE_ACCUMULATION = range(3)
def __init__(self,proxy):
self._proxy = proxy
acq_mode = (("SINGLE",self.ACQ_MODE_SINGLE),
("CONCATENATION",self.ACQ_MODE_CONCATENATION),
("ACCUMULATION",self.ACQ_MODE_ACCUMULATION))
self.__acq_mode_from_str = dict(acq_mode)
self.__acq_mode_from_enum = dict(((y,x) for x,y in acq_mode))
@property
def exposition_time(self):
"""
exposition time for a frame
"""
return self._proxy.acq_expo_time
@exposition_time.setter
def exposition_time(self,value):
self._proxy.acq_expo_time = value
@property
def mode(self):
"""
acquisition mode (SINGLE,CONCATENATION,ACCUMULATION)
"""
acq_mode = self._proxy.acq_mode
return self.__acq_mode_from_str.get(acq_mode)
@mode.setter
def mode(self,value):
mode_str = self.__acq_mode_from_enum.get(value)
if mode_str is None:
possible_modes = ','.join(('%d -> %s' % (y,x)
for x,y in self.__acq_mode_from_str.iteritems()))
raise ValueError("lima: acquisition mode can only be: %s" % possible_modes)
self._proxy.acq_mode = mode_str
@property
def trigger_mode(self):
"""
Trigger camera mode
"""
pass
@trigger_mode.setter
def trigger_mode(self,value):
pass
def __init__(self,name,config_tree):
"""Lima controller.
name -- the controller's name
config_tree -- controller configuration
in this dictionary we need to have:
tango_url -- tango main device url (from class LimaCCDs)
"""
self._proxy = DeviceProxy(config_tree.get("tango_url"))
self.name = name
self.__bpm = None
self.__roi_counters = None
self._camera = None
self._image = None
self._acquisition = None
@property
def proxy(self):
return self._proxy
@property
def image(self):
if self._image is None:
self._image = Lima.Image(self._proxy)
return self._image
@property
def shape(self):
return (-1, -1)
@property
def acquisition(self):
if self._acquisition is None:
self._acquisition = Lima.Acquisition(self._proxy)
return self._acquisition
@property
def roi_counters(self):
if self.__roi_counters is None:
roi_counters_proxy = self._get_proxy(self.ROI_COUNTERS)
self.__roi_counters = RoiCounters(self.name, roi_counters_proxy, self)
return self.__roi_counters
@property
def camera(self):
if self._camera is None:
camera_type = self._proxy.lima_type
proxy = self._get_proxy(camera_type)
camera_module = importlib.import_module('.%s' % camera_type,__package__)
self._camera = camera_module.Camera(self.name, proxy)
return self._camera
@property
def camera_type(self):
return self._proxy.camera_type
@property
def bpm(self):
if self.__bpm is None:
bpm_proxy = self._get_proxy(self.BPM)
self.__bpm = Bpm(self.name, bpm_proxy, self)
return self.__bpm
@property
def available_triggers(self):
"""
This will returns all availables triggers for the camera
"""
return self._proxy.getAttrStringValueList('acq_trigger_mode')
def prepareAcq(self):
self._proxy.prepareAcq()
def startAcq(self):
self._proxy.startAcq()
def _get_proxy(self,type_name):
device_name = self._proxy.getPluginDeviceNameFromType(type_name)
if not device_name:
return
if not device_name.startswith("//"):
# build 'fully qualified domain' name
# '.get_fqdn()' doesn't work
db_host = self._proxy.get_db_host()
db_port = self._proxy.get_db_port()
device_name = "//%s:%s/%s" % (db_host, db_port, device_name)
return DeviceProxy(device_name)
| lgpl-3.0 | 6,689,777,223,001,568,000 | 32.028986 | 92 | 0.539857 | false |
reinhrst/aio-s3 | aios3/bucket.py | 1 | 14612 | import datetime
import hmac
import base64
import hashlib
import asyncio
from xml.etree.ElementTree import fromstring as parse_xml
from xml.etree.ElementTree import tostring as xml_tostring
from xml.etree.ElementTree import Element, SubElement
from functools import partial
from urllib.parse import quote
import aiohttp
from . import errors
amz_uriencode = partial(quote, safe='~')
amz_uriencode_slash = partial(quote, safe='~/')
S3_NS = 'http://s3.amazonaws.com/doc/2006-03-01/'
NS = {'s3': S3_NS}
_SIGNATURES = {}
SIGNATURE_V4 = 'v4'
class Key(object):
def __init__(self, *, key, last_modified, etag, size, storage_class):
self.key = key
self.last_modified = last_modified
self.etag = etag
self.size = size
self.storage_class = storage_class
@classmethod
def from_xml(Key, el):
return Key(
key=el.find('s3:Key', namespaces=NS).text,
last_modified=datetime.datetime.strptime(
el.find('s3:LastModified', namespaces=NS).text,
'%Y-%m-%dT%H:%M:%S.000Z'),
etag=el.find('s3:ETag', namespaces=NS).text,
size=int(el.find('s3:Size', namespaces=NS).text),
storage_class=el.find('s3:StorageClass', namespaces=NS).text)
def __repr__(self):
return '<Key {}:{}>'.format(self.key, self.size)
class Request(object):
def __init__(self, verb, resource, query, headers, payload):
self.verb = verb
self.resource = amz_uriencode_slash(resource)
self.params = query
self.query_string = '&'.join(k + '=' + v
for k, v in sorted((amz_uriencode(k), amz_uriencode(v))
for k, v in query.items()))
self.headers = headers
self.payload = payload
self.content_md5 = ''
@property
def url(self):
return 'https://{0.headers[HOST]}{0.resource}?{0.query_string}' \
.format(self)
def _hmac(key, val):
return hmac.new(key, val, hashlib.sha256).digest()
def _signkey(key, date, region, service):
date_key = _hmac(("AWS4" + key).encode('ascii'),
date.encode('ascii'))
date_region_key = _hmac(date_key, region.encode('ascii'))
svc_key = _hmac(date_region_key, service.encode('ascii'))
return _hmac(svc_key, b'aws4_request')
@partial(_SIGNATURES.setdefault, SIGNATURE_V4)
def sign_v4(req, *,
aws_key, aws_secret, aws_token, aws_service='s3', aws_region='us-east-1', **_):
time = datetime.datetime.utcnow()
date = time.strftime('%Y%m%d')
timestr = time.strftime("%Y%m%dT%H%M%SZ")
req.headers['x-amz-date'] = timestr
if isinstance(req.payload, bytes):
payloadhash = hashlib.sha256(req.payload).hexdigest()
else:
payloadhash = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
req.headers['x-amz-content-sha256'] = payloadhash
if aws_token:
req.headers['x-amz-security-token'] = aws_token
signing_key = _signkey(aws_secret, date, aws_region, aws_service)
headernames = ';'.join(k.lower() for k in sorted(req.headers))
creq = (
"{req.verb}\n"
"{req.resource}\n"
"{req.query_string}\n"
"{headers}\n\n"
"{headernames}\n"
"{payloadhash}".format(
req=req,
headers='\n'.join(k.lower() + ':' + req.headers[k].strip()
for k in sorted(req.headers)),
headernames=headernames,
payloadhash=payloadhash
))
string_to_sign = (
"AWS4-HMAC-SHA256\n{ts}\n"
"{date}/{region}/{service}/aws4_request\n"
"{reqhash}".format(
ts=timestr,
date=date,
region=aws_region,
service=aws_service,
reqhash=hashlib.sha256(creq.encode('ascii')).hexdigest(),
))
sig = hmac.new(signing_key, string_to_sign.encode('ascii'),
hashlib.sha256).hexdigest()
ahdr = ('AWS4-HMAC-SHA256 '
'Credential={key}/{date}/{region}/{service}/aws4_request, '
'SignedHeaders={headers}, Signature={sig}'.format(
key=aws_key, date=date, region=aws_region, service=aws_service,
headers=headernames,
sig=sig,
))
req.headers['Authorization'] = ahdr
def _hmac_old(key, val):
return hmac.new(key, val, hashlib.sha1).digest()
class MultipartUpload(object):
def __init__(self, bucket, key, upload_id):
self.bucket = bucket
self.key = key
self.upload_id = upload_id
self.xml = Element('CompleteMultipartUpload')
self.parts = 0
self._done = False
self._uri = '/' + self.key + '?uploadId=' + self.upload_id
@asyncio.coroutine
def add_chunk(self, data):
assert isinstance(data, (bytes, memoryview, bytearray)), data
# figure out how to check chunk size, all but last one
# assert len(data) > 5 << 30, "Chunk must be at least 5Mb"
if self._done:
raise RuntimeError("Can't add_chunk after commit or close")
self.parts += 1
result = yield from self.bucket._request(Request("PUT",
'/' + self.key, {
'uploadId': self.upload_id,
'partNumber': str(self.parts),
}, headers={
'CONTENT-LENGTH': str(len(data)),
'HOST': self.bucket._host,
# next one aiohttp adds for us anyway, so we must put it here
# so it's added into signature
'CONTENT-TYPE': 'application/octed-stream',
}, payload=data))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
etag = result.headers['ETAG']
finally:
result.close()
chunk = SubElement(self.xml, 'Part')
SubElement(chunk, 'PartNumber').text = str(self.parts)
SubElement(chunk, 'ETag').text = etag
@asyncio.coroutine
def commit(self):
if self._done:
raise RuntimeError("Can't commit twice or after close")
self._done = True
data = xml_tostring(self.xml)
result = yield from self.bucket._request(Request("POST",
'/' + self.key, {
'uploadId': self.upload_id,
}, headers={
'CONTENT-LENGTH': str(len(data)),
'HOST': self.bucket._host,
'CONTENT-TYPE': 'application/xml',
}, payload=data))
try:
xml = yield from result.read()
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, xml)
xml = parse_xml(xml)
return xml.find('s3:ETag', namespaces=NS)
finally:
result.close()
@asyncio.coroutine
def close(self):
if self._done:
return
self._done = True
result = yield from self.bucket._request(Request("DELETE",
'/' + self.key, {
'uploadId': self.upload_id,
}, headers={'HOST': self.bucket._host}, payload=b''))
try:
xml = yield from result.read()
if result.status != 204:
raise errors.AWSException.from_bytes(result.status, xml)
finally:
result.close()
class Bucket(object):
def __init__(self, name, *,
port=80,
aws_key, aws_secret, aws_token,
aws_region='us-east-1',
aws_endpoint='s3.amazonaws.com',
signature=SIGNATURE_V4,
connector=None):
self._name = name
self._connector = None
self._aws_sign_data = {
'aws_key': aws_key,
'aws_secret': aws_secret,
'aws_token': aws_token,
'aws_region': aws_region,
'aws_service': 's3',
'aws_bucket': name,
}
self._host = self._name + '.' + aws_endpoint
if port != 80:
self._host = self._host + ':' + str(port)
self._signature = signature
@asyncio.coroutine
def exists(self, prefix=''):
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'separator': '/',
'max-keys': '1'},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
return any(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
@asyncio.coroutine
def list(self, prefix='', max_keys=1000):
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'max-keys': str(max_keys)},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
if x.find('s3:IsTruncated', namespaces=NS).text != 'false':
raise AssertionError(
"File list is truncated, use bigger max_keys")
return list(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
def list_by_chunks(self, prefix='', max_keys=1000, after_filename=None):
final = False
if after_filename:
marker = after_filename
else:
marker = ''
@asyncio.coroutine
def read_next():
nonlocal final, marker
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'max-keys': str(max_keys),
'marker': marker},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
result = list(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
if(x.find('s3:IsTruncated', namespaces=NS).text == 'false' or
len(result) == 0):
final = True
else:
marker = result[-1].key
return result
while not final:
yield read_next()
@asyncio.coroutine
def download(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request(
"GET", '/' + key, {}, {'HOST': self._host}, b''))
if result.status != 200:
raise errors.AWSException.from_bytes(
result.status, (yield from result.read()))
return result
@asyncio.coroutine
def upload(self, key, data,
content_length=None,
content_type='application/octed-stream',
last_modified=None):
"""Upload file to S3
The `data` might be a generator or stream.
the `content_length` is unchecked so it's responsibility of user to
ensure that it matches data.
Note: Riak CS doesn't allow to upload files without content_length.
"""
if isinstance(key, Key):
key = key.key
if isinstance(data, str):
data = data.encode('utf-8')
headers = {
'HOST': self._host,
'CONTENT-TYPE': content_type,
"x-amz-server-side-encryption": "AES256",
}
if content_length is not None:
headers['CONTENT-LENGTH'] = str(content_length)
if last_modified:
headers.update({"x-amz-last-modified": last_modified})
headers.update({"x-amz-server-side-encryption": "AES256"})
result = yield from self._request(Request("PUT", '/' + key, {},
headers=headers, payload=data))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
return result
finally:
result.close()
@asyncio.coroutine
def delete(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request("DELETE", '/' + key, {},
{'HOST': self._host}, b''))
try:
if result.status != 204:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
return result
finally:
result.close()
@asyncio.coroutine
def get(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request(
"GET", '/' + key, {}, {'HOST': self._host}, b''))
if result.status != 200:
raise errors.AWSException.from_bytes(
result.status, (yield from result.read()))
data = yield from result.read()
return data
@asyncio.coroutine
def _request(self, req):
_SIGNATURES[self._signature](req, **self._aws_sign_data)
if isinstance(req.payload, bytes):
req.headers['CONTENT-LENGTH'] = str(len(req.payload))
return (yield from aiohttp.request(req.verb, req.url,
chunked='CONTENT-LENGTH' not in req.headers,
headers=req.headers,
data=req.payload,
connector=self._connector))
@asyncio.coroutine
def upload_multipart(self, key,
content_type='application/octed-stream',
MultipartUpload=MultipartUpload):
"""Upload file to S3 by uploading multiple chunks"""
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request("POST",
'/' + key, {'uploads': ''}, {
'HOST': self._host,
'CONTENT-TYPE': content_type,
}, payload=b''))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
xml = yield from result.read()
upload_id = parse_xml(xml).find('s3:UploadId',
namespaces=NS).text
assert upload_id, xml
return MultipartUpload(self, key, upload_id)
finally:
result.close()
| mit | 5,031,407,894,351,788,000 | 32.981395 | 88 | 0.536408 | false |
mendersoftware/integration | testutils/infra/container_manager/docker_manager.py | 1 | 1777 | # Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from .base import BaseContainerManagerNamespace
class DockerNamespace(BaseContainerManagerNamespace):
def __init__(self, name):
BaseContainerManagerNamespace.__init__(self, name)
def setup(self):
pass
def teardown(self):
pass
def execute(self, container_id, cmd):
cmd = ["docker", "exec", "{}".format(container_id)] + cmd
ret = subprocess.check_output(cmd).decode("utf-8").strip()
return ret
def cmd(self, container_id, docker_cmd, cmd=[]):
cmd = ["docker", docker_cmd] + [str(container_id)] + cmd
ret = subprocess.run(
cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return ret.stdout.decode("utf-8").strip()
def getid(self, filters):
filters.append(self.name)
filters = ["grep {}".format(f) for f in filters]
cmd = "docker ps | " + " | ".join(filters) + " | awk '{print $1}'"
ret = subprocess.check_output(cmd, shell=True).decode("utf-8").strip()
if ret == "":
raise RuntimeError("container id for {} not found".format(str(filters)))
return ret
| apache-2.0 | 3,428,999,405,347,108,000 | 33.843137 | 84 | 0.642656 | false |
jmesteve/openerpseda | openerp/addons_extra/point_of_sale/__openerp__.py | 1 | 4171 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Point of Sale',
'version': '1.0.2',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'author': 'OpenERP SA',
'images': ['images/pos_touch_screen.jpeg', 'images/pos_session.jpeg', 'images/pos_analysis.jpeg','images/sale_order_pos.jpeg','images/product_pos.jpeg'],
'depends': ['sale_stock','product_images'],
'data': [
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'wizard/pos_details.xml',
'wizard/pos_confirm.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment_report_user_view.xml',
'wizard/pos_sales_user.xml',
'wizard/pos_receipt_view.xml',
'wizard/pos_payment_report_user.xml',
'wizard/pos_payment_report.xml',
'wizard/pos_payment.xml',
'wizard/pos_box.xml',
'wizard/pos_session_opening.xml',
'point_of_sale_report.xml',
'point_of_sale_view.xml',
'point_of_sale_data.xml',
'report/pos_order_report_view.xml',
'point_of_sale_sequence.xml',
'point_of_sale_workflow.xml',
'account_statement_view.xml',
'account_statement_report.xml',
'res_users_view.xml',
'res_partner_view.xml',
],
'demo': [
'point_of_sale_demo.xml',
'account_statement_demo.xml',
'test/00_register_open.yml'
],
'test': [
'test/01_order_to_payment.yml',
'test/02_order_to_invoice.yml',
'test/point_of_sale_report.yml'
],
'installable': True,
'application': True,
'js': [
'static/lib/mousewheel/jquery.mousewheel-3.0.6.js',
'static/src/js/db.js',
'static/src/js/models.js',
'static/src/js/widget_base.js',
'static/src/js/widget_keyboard.js',
'static/src/js/widget_scrollbar.js',
'static/src/js/widgets.js',
'static/src/js/devices.js',
'static/src/js/screens.js',
'static/src/js/main.js',
],
'css': [
'static/src/css/pos.css', # this is the default css with hover effects
#'static/src/css/pos_nohover.css', # this css has no hover effects (for resistive touchscreens)
'static/src/css/keyboard.css'
],
'qweb': ['static/src/xml/pos.xml'],
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,856,412,445,368,254,000 | 36.241071 | 157 | 0.61544 | false |
shirtsgroup/pygo | package/writetopdb.py | 1 | 4226 | import numpy
import pdb
'''This module contains various I/O helper methods'''
def get_coord(filename):
f = file(filename,'r')
# first count number of atoms to instantiate coord array
n = 0
for line in f:
if 'ATOM' in line:
n += 1
coord = numpy.empty((n,3))
pdb_text = [] # will hold the pdb file text
i = 0
f.seek(0)
for line in f:
if 'ATOM' in line:
pdb_text.append(line)
coord[i,0] = float(line[31:38])
coord[i,1] = float(line[39:46])
coord[i,2] = float(line[47:54])
i += 1
f.close()
assert(i == n)
return coord, pdb_text
def writeseqpdb(mpos,text,posline,move):
'''Deprecated'''
# multi file pdb output
j=0
write=''
for i in posline:
words=text[i,][0:30]
coordstr=''
coordstr=coordstr+str('%8.3f') % mpos[j,0]
coordstr=coordstr+str('%8.3f') % mpos[j,1]
coordstr=coordstr+str('%8.3f') % mpos[j,2]
coordstr=coordstr+'\r\n'
j+=1
text[i]=words+coordstr
f=file(str(move)+'.pdb','w')
for k in range(len(text)): #don't want 'END'
write=write+text[k]
f.write(write)
f.close
def writepdb(mpos,text,posline,move,filename):
'''Deprecated'''
# 1 file pdb output
j=0
for i in posline:
words=[text[i][0:30],'%8.3f' %(mpos[j,0]),'%8.3f' %(mpos[j,1]),'%8.3f'%(mpos[j,2]),'\r\n']
j=j+1
text[i]="".join(words)
f=file(filename,'w')
f.write('MODEL %i\r\n' % (move)) #check moves here
write="".join(text[0:-1])
f.write(write)
f.write('ENDMDL\r\n')
f.close
def addtopdb(mpos,coordtext,move,filename):
'''Deprecated'''
# 1 file pdb output
for i in range(len(coordtext)):
words=[coordtext[i][0:30],'%8.3f' %(mpos[i,0]),'%8.3f' %(mpos[i,1]),'%8.3f'%(mpos[i,2]),'\r\n']
coordtext[i]="".join(words)
f=file(filename,'a')
f.write('MODEL %i\r\n' % (move))
write="".join(coordtext)
f.write(write)
f.write('ENDMDL\r\n')
f.close
def addconnect(filename,k):
#filename = .pdb file for linear chain polymer/protein without bonds
#k = number of beads in polymer/protein
f=open(filename,'a')
text=''
text=text+'CONECT 1 2\r\n'
for i in range(2,k):
text=text+'CONECT '
text=text+str('%3.0f') % i
text=text+' '
text=text+str('%3.0f') % (i-1)
text=text+' '
text=text+str('%3.0f') % (i+1)
text=text+'\r\n'
text=text+'CONECT '+str(k)+' '+str(k-1)+'\r\nEND\r\n'
f.write(text)
f.close()
def getmovietransform(nativecoord):
'''Deprecated'''
nc=nativecoord.copy()
translate= nc[0,:]
nc -= translate
BC = nc[1,:]
x1 = BC/numpy.dot(BC,BC)**.5
AB = numpy.array([.5,.5,.5]); #random, but constant for all simulations
y1 = AB-numpy.dot(AB,BC)/numpy.dot(BC,BC)*BC
y1 = y1/numpy.sum(y1**2)**.5
z1 = numpy.cross(x1,y1)
return numpy.array([x1,y1,z1])
def getmovietransform_old(nativecoord):
'''Deprecated'''
nc = nativecoord.copy()
center = len(nc)/2
translate = nc[center,:]
translate = translate.copy()
for i in range(len(nc)):
nc[i,:] -= translate
BC = nc[center+1,:]
x1 = BC/numpy.dot(BC,BC)**.5
AB = [.5,.5,.5]; #random, but constant for all simulations
y1 = AB-numpy.dot(AB,BC)/numpy.dot(BC,BC)*BC
y1 = y1/numpy.dot(y1,y1)**.5
z1 = numpy.cross(x1,y1)
return [x1,y1,z1]
def moviecoord(mpos123,transform):
'''Deprecated'''
mpos = mpos123.copy()
mpos[0,:] = numpy.zeros(3)
bond = mpos123[1:len(mpos123),:]-mpos123[0:-1,:]
bond = numpy.dot(bond,transform)
for i in xrange(len(mpos)-1):
mpos[i+1,:] = mpos[i,:]+bond[i,:]
return mpos
def moviecoord_old(mpos123,transform):
'''Deprecated'''
mpos=mpos123.copy()
center=len(mpos)/2
translate=mpos[center,:]
mpos-=translate
for i in range(center,len(mpos)-1):
BC=mpos123[i+1,:]-mpos123[i,:]
BCnew=dot(transform,BC.transpose())
mpos[i+1,:]=mpos[i,:]+BCnew
for i in range(center,0,-1):
BC=mpos123[i-1,:]-mpos123[i,:]
BCnew=dot(transform,BC.transpose())
mpos[i-1,:]=mpos[i,:]+BCnew
return mpos
| gpl-2.0 | -6,812,921,427,290,016,000 | 27.554054 | 103 | 0.561524 | false |
MobinRanjbar/hue | desktop/libs/notebook/src/notebook/management/commands/notebook_setup.py | 1 | 1828 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pwd
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import BaseCommand
from desktop.models import Document, Document2, SAMPLE_USER_OWNERS
from useradmin.models import install_sample_user
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
args = '<user>'
help = 'Install examples but do not overwrite them.'
def handle(self, *args, **options):
if not options.get('user'):
user = User.objects.get(username=pwd.getpwuid(os.getuid()).pw_name)
else:
user = options['user']
if not Document2.objects.filter(type='notebook', owner__username__in=SAMPLE_USER_OWNERS).exists():
install_sample_user()
management.call_command('loaddata', 'initial_notebook_examples.json', verbosity=2)
Document.objects.sync()
from beeswax.management.commands.beeswax_install_examples import Command
app_name = 'beeswax'
Command().handle(app_name=app_name, user=user, tables='tables.json')
| apache-2.0 | -1,704,254,534,951,915,800 | 34.843137 | 102 | 0.745077 | false |
suyashphadtare/test | erpnext/accounts/report/financial_statements.py | 1 | 7698 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, _dict
from frappe.utils import (flt, getdate, get_first_day, get_last_day,
add_months, add_days, formatdate)
def get_period_list(fiscal_year, periodicity, from_beginning=False):
"""Get a list of dict {"to_date": to_date, "key": key, "label": label}
Periodicity can be (Yearly, Quarterly, Monthly)"""
fy_start_end_date = frappe.db.get_value("Fiscal Year", fiscal_year, ["year_start_date", "year_end_date"])
if not fy_start_end_date:
frappe.throw(_("Fiscal Year {0} not found.").format(fiscal_year))
start_date = getdate(fy_start_end_date[0])
end_date = getdate(fy_start_end_date[1])
if periodicity == "Yearly":
period_list = [_dict({"to_date": end_date, "key": fiscal_year, "label": fiscal_year})]
else:
months_to_add = {
"Half-yearly": 6,
"Quarterly": 3,
"Monthly": 1
}[periodicity]
period_list = []
# start with first day, so as to avoid year to_dates like 2-April if ever they occur
to_date = get_first_day(start_date)
for i in xrange(12 / months_to_add):
to_date = add_months(to_date, months_to_add)
if to_date == get_first_day(to_date):
# if to_date is the first day, get the last day of previous month
to_date = add_days(to_date, -1)
else:
# to_date should be the last day of the new to_date's month
to_date = get_last_day(to_date)
if to_date <= end_date:
# the normal case
period_list.append(_dict({ "to_date": to_date }))
# if it ends before a full year
if to_date == end_date:
break
else:
# if a fiscal year ends before a 12 month period
period_list.append(_dict({ "to_date": end_date }))
break
# common processing
for opts in period_list:
key = opts["to_date"].strftime("%b_%Y").lower()
label = formatdate(opts["to_date"], "MMM YYYY")
opts.update({
"key": key.replace(" ", "_").replace("-", "_"),
"label": label,
"year_start_date": start_date,
"year_end_date": end_date
})
if from_beginning:
# set start date as None for all fiscal periods, used in case of Balance Sheet
opts["from_date"] = None
else:
opts["from_date"] = start_date
return period_list
def get_data(company, root_type, balance_must_be, period_list, ignore_closing_entries=False):
accounts = get_accounts(company, root_type)
if not accounts:
return None
accounts, accounts_by_name = filter_accounts(accounts)
gl_entries_by_account = get_gl_entries(company, period_list[0]["from_date"], period_list[-1]["to_date"],
accounts[0].lft, accounts[0].rgt, ignore_closing_entries=ignore_closing_entries)
calculate_values(accounts, gl_entries_by_account, period_list)
accumulate_values_into_parents(accounts, accounts_by_name, period_list)
out = prepare_data(accounts, balance_must_be, period_list)
if out:
add_total_row(out, balance_must_be, period_list)
return out
def calculate_values(accounts, gl_entries_by_account, period_list):
for d in accounts:
for name in ([d.name] + (d.collapsed_children or [])):
for entry in gl_entries_by_account.get(name, []):
for period in period_list:
entry.posting_date = getdate(entry.posting_date)
# check if posting date is within the period
if entry.posting_date <= period.to_date:
d[period.key] = d.get(period.key, 0.0) + flt(entry.debit) - flt(entry.credit)
def accumulate_values_into_parents(accounts, accounts_by_name, period_list):
"""accumulate children's values in parent accounts"""
for d in reversed(accounts):
if d.parent_account:
for period in period_list:
accounts_by_name[d.parent_account][period.key] = accounts_by_name[d.parent_account].get(period.key, 0.0) + \
d.get(period.key, 0.0)
def prepare_data(accounts, balance_must_be, period_list):
out = []
year_start_date = period_list[0]["year_start_date"].strftime("%Y-%m-%d")
year_end_date = period_list[-1]["year_end_date"].strftime("%Y-%m-%d")
for d in accounts:
# add to output
has_value = False
row = {
"account_name": d.account_name,
"account": d.name,
"parent_account": d.parent_account,
"indent": flt(d.indent),
"from_date": year_start_date,
"to_date": year_end_date
}
for period in period_list:
if d.get(period.key):
# change sign based on Debit or Credit, since calculation is done using (debit - credit)
d[period.key] *= (1 if balance_must_be=="Debit" else -1)
row[period.key] = flt(d.get(period.key, 0.0), 3)
if abs(row[period.key]) >= 0.005:
# ignore zero values
has_value = True
if has_value:
out.append(row)
return out
def add_total_row(out, balance_must_be, period_list):
row = {
"account_name": _("Total ({0})").format(balance_must_be),
"account": None
}
for period in period_list:
row[period.key] = out[0].get(period.key, 0.0)
out[0][period.key] = ""
out.append(row)
# blank row after Total
out.append({})
def get_accounts(company, root_type):
# root lft, rgt
root_account = frappe.db.sql("""select lft, rgt from tabAccount
where company=%s and root_type=%s order by lft limit 1""",
(company, root_type), as_dict=True)
if not root_account:
return None
lft, rgt = root_account[0].lft, root_account[0].rgt
accounts = frappe.db.sql("""select * from tabAccount
where company=%(company)s and lft >= %(lft)s and rgt <= %(rgt)s order by lft""",
{ "company": company, "lft": lft, "rgt": rgt }, as_dict=True)
return accounts
def filter_accounts(accounts, depth=10):
parent_children_map = {}
accounts_by_name = {}
for d in accounts:
accounts_by_name[d.name] = d
parent_children_map.setdefault(d.parent_account or None, []).append(d)
filtered_accounts = []
def add_to_list(parent, level):
if level < depth:
for child in (parent_children_map.get(parent) or []):
child.indent = level
filtered_accounts.append(child)
add_to_list(child.name, level + 1)
else:
# include all children at level lower than the depth
parent_account = accounts_by_name[parent]
parent_account["collapsed_children"] = []
for d in accounts:
if d.lft > parent_account.lft and d.rgt < parent_account.rgt:
parent_account["collapsed_children"].append(d.name)
add_to_list(None, 0)
return filtered_accounts, accounts_by_name
def get_gl_entries(company, from_date, to_date, root_lft, root_rgt, ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = []
if ignore_closing_entries:
additional_conditions.append("and ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("and posting_date >= %(from_date)s")
gl_entries = frappe.db.sql("""select * from tabGL_Entry
where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and account in (select name from tabAccount
where lft >= %(lft)s and rgt <= %(rgt)s)
order by account, posting_date""".format(additional_conditions="\n".join(additional_conditions)),
{
"company": company,
"from_date": from_date,
"to_date": to_date,
"lft": root_lft,
"rgt": root_rgt
},
as_dict=True)
gl_entries_by_account = {}
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.account, []).append(entry)
return gl_entries_by_account
def get_columns(period_list):
columns = [{
"fieldname": "account",
"label": _("Account"),
"fieldtype": "Link",
"options": "Account",
"width": 300
}]
for period in period_list:
columns.append({
"fieldname": period.key,
"label": period.label,
"fieldtype": "Currency",
"width": 150
})
return columns
| agpl-3.0 | -3,243,688,243,815,596,500 | 29.426877 | 112 | 0.669654 | false |
wgong/open_source_learning | learn_stem/python/utilities/nb2to3.py | 1 | 2569 | #!/usr/bin/env python3
"""
To run: python3 nb2to3.py notebook-or-directory
"""
# Authors: Thomas Kluyver, Fernando Perez
# See: https://gist.github.com/takluyver/c8839593c615bb2f6e80
# found at https://stackoverflow.com/questions/20651502/ipython-code-migration-from-python-2-to-python-3
import argparse
import pathlib
from nbformat import read, write
import lib2to3
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
def refactor_notebook_inplace(rt, path):
def refactor_cell(src):
#print('\n***SRC***\n', src)
try:
tree = rt.refactor_string(src+'\n', str(path) + '/cell-%d' % i)
except (lib2to3.pgen2.parse.ParseError,
lib2to3.pgen2.tokenize.TokenError):
return src
else:
return str(tree)[:-1]
print("Refactoring:", path)
nb = read(str(path), as_version=4)
# Run 2to3 on code
for i, cell in enumerate(nb.cells, start=1):
if cell.cell_type == 'code':
if cell.execution_count in (' ', '*'):
cell.execution_count = None
if cell.source.startswith('%%'):
# For cell magics, try to refactor the body, in case it's
# valid python
head, source = cell.source.split('\n', 1)
cell.source = head + '\n' + refactor_cell(source)
else:
cell.source = refactor_cell(cell.source)
# Update notebook metadata
nb.metadata.kernelspec = {
'display_name': 'Python 3',
'name': 'python3',
'language': 'python',
}
if 'language_info' in nb.metadata:
nb.metadata.language_info.codemirror_mode = {
'name': 'ipython',
'version': 3,
}
nb.metadata.language_info.pygments_lexer = 'ipython3'
nb.metadata.language_info.pop('version', None)
write(nb, str(path))
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument('path', type=pathlib.Path,
help="Notebook or directory containing notebooks")
options = ap.parse_args(argv)
avail_fixes = set(get_fixers_from_package('lib2to3.fixes'))
rt = RefactoringTool(avail_fixes)
if options.path.is_dir():
for nb_path in options.path.rglob('*.ipynb'):
refactor_notebook_inplace(rt, nb_path)
else:
refactor_notebook_inplace(rt, options.path)
if __name__ == '__main__':
main() | apache-2.0 | 8,255,359,468,792,053,000 | 29.353659 | 104 | 0.571818 | false |
duncanwp/cis_plugins | hadgem_unknown_vars.py | 1 | 2736 | __author__ = 'watson-parris'
from cis.data_io.products.HadGEM import HadGEM_PP
import logging
class HadGEM_unknown_vars(HadGEM_PP):
def get_variable_names(self, filenames, data_type=None):
import iris
import cf_units as unit
from cis.utils import single_warnings_only
# Removes warnings and prepares for future Iris change
iris.FUTURE.netcdf_promote = True
variables = []
# Filter the warnings so that they only appear once - otherwise you get lots of repeated warnings
with single_warnings_only():
cubes = iris.load(filenames)
for cube in cubes:
is_time_lat_lon_pressure_altitude_or_has_only_1_point = True
for dim in cube.dim_coords:
units = dim.units
if dim.points.size > 1 and \
not units.is_time() and \
not units.is_time_reference() and \
not units.is_vertical() and \
not units.is_convertible(unit.Unit('degrees')):
is_time_lat_lon_pressure_altitude_or_has_only_1_point = False
break
if is_time_lat_lon_pressure_altitude_or_has_only_1_point:
name = cube.var_name or cube.name()
if name == 'unknown' and 'STASH' in cube.attributes:
name = '{}'.format(cube.attributes['STASH'])
variables.append(name)
return set(variables)
@staticmethod
def load_multiple_files_callback(cube, field, filename):
# This method sets the var_name (used for outputting the cube to NetCDF) to the cube name. This can be quite
# for some HadGEM variables but most commands allow the user to override this field on output.
var_name = cube.name()
if var_name == 'unknown' and 'STASH' in cube.attributes:
var_name = '{}'.format(cube.attributes['STASH'])
try:
cube.var_name = var_name
except ValueError as e:
logging.info("Unable to set var_name due to error: {}".format(e))
@staticmethod
def load_single_file_callback(cube, field, filename):
# This method sets the var_name (used for outputting the cube to NetCDF) to the cube name. This can be quite
# for some HadGEM variables but most commands allow the user to override this field on output.
var_name = cube.name()
if var_name == 'unknown' and 'STASH' in cube.attributes:
var_name = '{}'.format(cube.attributes['STASH'])
try:
cube.var_name = var_name
except ValueError as e:
logging.info("Unable to set var_name due to error: {}".format(e))
| lgpl-3.0 | -1,136,911,862,083,071,600 | 43.129032 | 116 | 0.595395 | false |
frigg/frigg-settings | setup.py | 1 | 1053 | # -*- encoding: utf8 -*-
import re
from setuptools import find_packages, setup
def _read_long_description():
try:
import pypandoc
return pypandoc.convert('README.md', 'rst', format='markdown')
except Exception:
return None
version = ''
with open('frigg_settings/__init__.py', 'r') as fd:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(),
re.MULTILINE
).group(1)
setup(
name='frigg-settings',
version=version,
description='A module for parsing and discovery of frigg settings file',
long_description=_read_long_description(),
packages=find_packages(exclude='tests'),
author='The frigg team',
author_email='[email protected]',
license='MIT',
url='https://github.com/frigg/frigg-settings',
py_modules=['frigg_test_discovery'],
include_package_data=True,
install_requires=[
'pyyaml==3.11',
'frigg-test-discovery>1.0,<2.0',
],
classifiers=[
'Programming Language :: Python :: 3',
]
)
| mit | 5,776,911,896,159,276,000 | 23.488372 | 76 | 0.598291 | false |
puttarajubr/commcare-hq | corehq/ex-submodules/phonelog/models.py | 1 | 1259 | from dimagi.ext.couchdbkit import Document
from django.db import models
COUCH_UUID_MAX_LEN = 50
class DeviceReportEntry(models.Model):
xform_id = models.CharField(max_length=COUCH_UUID_MAX_LEN, db_index=True)
i = models.IntegerField()
msg = models.TextField()
type = models.CharField(max_length=32, db_index=True)
date = models.DateTimeField(db_index=True)
domain = models.CharField(max_length=100, db_index=True)
device_id = models.CharField(max_length=COUCH_UUID_MAX_LEN, db_index=True,
null=True)
app_version = models.TextField(null=True)
username = models.CharField(max_length=100, db_index=True, null=True)
user_id = models.CharField(max_length=COUCH_UUID_MAX_LEN, db_index=True, null=True)
class Meta:
unique_together = [('xform_id', 'i')]
class UserEntry(models.Model):
xform_id = models.CharField(max_length=COUCH_UUID_MAX_LEN, db_index=True)
i = models.IntegerField()
user_id = models.CharField(max_length=COUCH_UUID_MAX_LEN)
sync_token = models.CharField(max_length=COUCH_UUID_MAX_LEN)
username = models.CharField(max_length=100, db_index=True)
class Meta:
unique_together = [('xform_id', 'i')]
class _(Document):
pass
| bsd-3-clause | 4,064,221,458,120,337,000 | 33.972222 | 87 | 0.683082 | false |
abacuspix/NFV_project | Instant_Flask_Web_Development/sched/forms.py | 1 | 1083 | """Forms to render HTML input & validate request data."""
from wtforms import Form, BooleanField, DateTimeField, PasswordField
from wtforms import TextAreaField, TextField
from wtforms.validators import Length, required
class AppointmentForm(Form):
"""Render HTML input for Appointment model & validate submissions.
This matches the models.Appointment class very closely. Where
models.Appointment represents the domain and its persistence, this class
represents how to display a form in HTML & accept/reject the results.
"""
title = TextField('Title', [Length(max=255)])
start = DateTimeField('Start', [required()])
end = DateTimeField('End')
allday = BooleanField('All Day')
location = TextField('Location', [Length(max=255)])
description = TextAreaField('Description')
class LoginForm(Form):
"""Render HTML input for user login form.
Authentication (i.e. password verification) happens in the view function.
"""
username = TextField('Username', [required()])
password = PasswordField('Password', [required()])
| mit | 1,323,436,043,198,504,700 | 36.344828 | 77 | 0.722068 | false |
sgraham/nope | tools/telemetry/telemetry/core/browser_options.py | 1 | 15297 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import optparse
import os
import shlex
import socket
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_finder_exceptions
from telemetry.core import device_finder
from telemetry.core import platform
from telemetry.core import profile_types
from telemetry.core import util
from telemetry.core import wpr_modes
from telemetry.core.platform.profiler import profiler_finder
util.AddDirToPythonPath(
util.GetChromiumSrcDir(), 'third_party', 'webpagereplay')
import net_configs # pylint: disable=F0401
class BrowserFinderOptions(optparse.Values):
"""Options to be used for discovering a browser."""
def __init__(self, browser_type=None):
optparse.Values.__init__(self)
self.browser_type = browser_type
self.browser_executable = None
self.chrome_root = None
self.device = None
self.cros_ssh_identity = None
self.extensions_to_load = []
# If set, copy the generated profile to this path on exit.
self.output_profile_path = None
self.cros_remote = None
self.profiler = None
self.verbosity = 0
self.browser_options = BrowserOptions()
self.output_file = None
self.android_rndis = False
self.no_performance_mode = False
def __repr__(self):
return str(sorted(self.__dict__.items()))
def Copy(self):
return copy.deepcopy(self)
def CreateParser(self, *args, **kwargs):
parser = optparse.OptionParser(*args, **kwargs)
# Selection group
group = optparse.OptionGroup(parser, 'Which browser to use')
group.add_option('--browser',
dest='browser_type',
default=None,
help='Browser type to run, '
'in order of priority. Supported values: list,%s' %
','.join(browser_finder.FindAllBrowserTypes(self)))
group.add_option('--browser-executable',
dest='browser_executable',
help='The exact browser to run.')
group.add_option('--chrome-root',
dest='chrome_root',
help='Where to look for chrome builds.'
'Defaults to searching parent dirs by default.')
group.add_option('--device',
dest='device',
help='The device ID to use.'
'If not specified, only 0 or 1 connected devices are supported. If'
'specified as "android", all available Android devices are used.')
group.add_option('--target-arch',
dest='target_arch',
help='The target architecture of the browser. Options available are: '
'x64, x86_64, arm, arm64 and mips. '
'Defaults to the default architecture of the platform if omitted.')
group.add_option(
'--remote',
dest='cros_remote',
help='The hostname of a remote ChromeOS device to use.')
group.add_option(
'--remote-ssh-port',
type=int,
default=socket.getservbyname('ssh'),
dest='cros_remote_ssh_port',
help='The SSH port of the remote ChromeOS device (requires --remote).')
identity = None
testing_rsa = os.path.join(
util.GetChromiumSrcDir(),
'third_party', 'chromite', 'ssh_keys', 'testing_rsa')
if os.path.exists(testing_rsa):
identity = testing_rsa
group.add_option('--identity',
dest='cros_ssh_identity',
default=identity,
help='The identity file to use when ssh\'ing into the ChromeOS device')
parser.add_option_group(group)
# Debugging options
group = optparse.OptionGroup(parser, 'When things go wrong')
profiler_choices = profiler_finder.GetAllAvailableProfilers()
group.add_option(
'--profiler', default=None, type='choice',
choices=profiler_choices,
help='Record profiling data using this tool. Supported values: ' +
', '.join(profiler_choices))
group.add_option(
'--interactive', dest='interactive', action='store_true',
help='Let the user interact with the page; the actions specified for '
'the page are not run.')
group.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)')
group.add_option('--print-bootstrap-deps',
action='store_true',
help='Output bootstrap deps list.')
parser.add_option_group(group)
# Platform options
group = optparse.OptionGroup(parser, 'Platform options')
group.add_option('--no-performance-mode', action='store_true',
help='Some platforms run on "full performance mode" where the '
'test is executed at maximum CPU speed in order to minimize noise '
'(specially important for dashboards / continuous builds). '
'This option prevents Telemetry from tweaking such platform settings.')
group.add_option('--android-rndis', dest='android_rndis', default=False,
action='store_true', help='Use RNDIS forwarding on Android.')
group.add_option('--no-android-rndis', dest='android_rndis',
action='store_false', help='Do not use RNDIS forwarding on Android.'
' [default]')
parser.add_option_group(group)
# Browser options.
self.browser_options.AddCommandLineArgs(parser)
real_parse = parser.parse_args
def ParseArgs(args=None):
defaults = parser.get_default_values()
for k, v in defaults.__dict__.items():
if k in self.__dict__ and self.__dict__[k] != None:
continue
self.__dict__[k] = v
ret = real_parse(args, self) # pylint: disable=E1121
if self.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif self.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if self.device == 'list':
devices = device_finder.GetDevicesMatchingOptions(self)
print 'Available devices:'
for device in devices:
print ' ', device.name
sys.exit(0)
if self.browser_executable and not self.browser_type:
self.browser_type = 'exact'
if self.browser_type == 'list':
devices = device_finder.GetDevicesMatchingOptions(self)
if not devices:
sys.exit(0)
browser_types = {}
for device in devices:
try:
possible_browsers = browser_finder.GetAllAvailableBrowsers(self,
device)
browser_types[device.name] = sorted(
[browser.browser_type for browser in possible_browsers])
except browser_finder_exceptions.BrowserFinderException as ex:
print >> sys.stderr, 'ERROR: ', ex
sys.exit(1)
print 'Available browsers:'
if len(browser_types) == 0:
print ' No devices were found.'
for device_name in sorted(browser_types.keys()):
print ' ', device_name
for browser_type in browser_types[device_name]:
print ' ', browser_type
sys.exit(0)
# Parse browser options.
self.browser_options.UpdateFromParseResults(self)
return ret
parser.parse_args = ParseArgs
return parser
def AppendExtraBrowserArgs(self, args):
self.browser_options.AppendExtraBrowserArgs(args)
def MergeDefaultValues(self, defaults):
for k, v in defaults.__dict__.items():
self.ensure_value(k, v)
class BrowserOptions(object):
"""Options to be used for launching a browser."""
def __init__(self):
self.browser_type = None
self.show_stdout = False
# When set to True, the browser will use the default profile. Telemetry
# will not provide an alternate profile directory.
self.dont_override_profile = False
self.profile_dir = None
self.profile_type = None
self._extra_browser_args = set()
self.extra_wpr_args = []
self.wpr_mode = wpr_modes.WPR_OFF
self.netsim = None
self.disable_background_networking = True
self.no_proxy_server = False
self.browser_user_agent_type = None
self.clear_sytem_cache_for_browser_and_profile_on_start = False
self.startup_url = 'about:blank'
# Background pages of built-in component extensions can interfere with
# performance measurements.
self.disable_component_extensions_with_background_pages = True
# Disable default apps.
self.disable_default_apps = True
# Whether to use the new code path for choosing an ephemeral port for
# DevTools. The bots set this to true. When Chrome 37 reaches stable,
# remove this setting and the old code path. http://crbug.com/379980
self.use_devtools_active_port = False
def __repr__(self):
return str(sorted(self.__dict__.items()))
def IsCrosBrowserOptions(self):
return False
@classmethod
def AddCommandLineArgs(cls, parser):
############################################################################
# Please do not add any more options here without first discussing with #
# a telemetry owner. This is not the right place for platform-specific #
# options. #
############################################################################
group = optparse.OptionGroup(parser, 'Browser options')
profile_choices = profile_types.GetProfileTypes()
group.add_option('--profile-type',
dest='profile_type',
type='choice',
default='clean',
choices=profile_choices,
help=('The user profile to use. A clean profile is used by default. '
'Supported values: ' + ', '.join(profile_choices)))
group.add_option('--profile-dir',
dest='profile_dir',
help='Profile directory to launch the browser with. '
'A clean profile is used by default')
group.add_option('--extra-browser-args',
dest='extra_browser_args_as_string',
help='Additional arguments to pass to the browser when it starts')
group.add_option('--extra-wpr-args',
dest='extra_wpr_args_as_string',
help=('Additional arguments to pass to Web Page Replay. '
'See third_party/webpagereplay/replay.py for usage.'))
group.add_option('--netsim', default=None, type='choice',
choices=net_configs.NET_CONFIG_NAMES,
help=('Run benchmark under simulated network conditions. '
'Will prompt for sudo. Supported values: ' +
', '.join(net_configs.NET_CONFIG_NAMES)))
group.add_option('--show-stdout',
action='store_true',
help='When possible, will display the stdout of the process')
# This hidden option is to be removed, and the older code path deleted,
# once Chrome 37 reaches Stable. http://crbug.com/379980
group.add_option('--use-devtools-active-port',
action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Compatibility options')
group.add_option('--gtest_output',
help='Ignored argument for compatibility with runtest.py harness')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Synthetic gesture options')
synthetic_gesture_source_type_choices = ['default', 'mouse', 'touch']
group.add_option('--synthetic-gesture-source-type',
dest='synthetic_gesture_source_type',
default='default', type='choice',
choices=synthetic_gesture_source_type_choices,
help='Specify the source type for synthtic gestures. Note that some ' +
'actions only support a specific source type. ' +
'Supported values: ' +
', '.join(synthetic_gesture_source_type_choices))
parser.add_option_group(group)
def UpdateFromParseResults(self, finder_options):
"""Copies our options from finder_options"""
browser_options_list = [
'extra_browser_args_as_string',
'extra_wpr_args_as_string',
'netsim',
'profile_dir',
'profile_type',
'show_stdout',
'synthetic_gesture_source_type',
'use_devtools_active_port',
]
for o in browser_options_list:
a = getattr(finder_options, o, None)
if a is not None:
setattr(self, o, a)
delattr(finder_options, o)
self.browser_type = finder_options.browser_type
if hasattr(self, 'extra_browser_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_browser_args_as_string) # pylint: disable=E1101
self.AppendExtraBrowserArgs(tmp)
delattr(self, 'extra_browser_args_as_string')
if hasattr(self, 'extra_wpr_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_wpr_args_as_string) # pylint: disable=E1101
self.extra_wpr_args.extend(tmp)
delattr(self, 'extra_wpr_args_as_string')
if self.profile_type == 'default':
self.dont_override_profile = True
if self.profile_dir and self.profile_type != 'clean':
logging.critical(
"It's illegal to specify both --profile-type and --profile-dir.\n"
"For more information see: http://goo.gl/ngdGD5")
sys.exit(1)
if self.profile_dir and not os.path.isdir(self.profile_dir):
logging.critical(
"Directory specified by --profile-dir (%s) doesn't exist "
"or isn't a directory.\n"
"For more information see: http://goo.gl/ngdGD5" % self.profile_dir)
sys.exit(1)
if not self.profile_dir:
self.profile_dir = profile_types.GetProfileDir(self.profile_type)
# This deferred import is necessary because browser_options is imported in
# telemetry/telemetry/__init__.py.
finder_options.browser_options = CreateChromeBrowserOptions(self)
@property
def extra_browser_args(self):
return self._extra_browser_args
def AppendExtraBrowserArgs(self, args):
if isinstance(args, list):
self._extra_browser_args.update(args)
else:
self._extra_browser_args.add(args)
def CreateChromeBrowserOptions(br_options):
browser_type = br_options.browser_type
if (platform.GetHostPlatform().GetOSName() == 'chromeos' or
(browser_type and browser_type.startswith('cros'))):
return CrosBrowserOptions(br_options)
return br_options
class ChromeBrowserOptions(BrowserOptions):
"""Chrome-specific browser options."""
def __init__(self, br_options):
super(ChromeBrowserOptions, self).__init__()
# Copy to self.
self.__dict__.update(br_options.__dict__)
class CrosBrowserOptions(ChromeBrowserOptions):
"""ChromeOS-specific browser options."""
def __init__(self, br_options):
super(CrosBrowserOptions, self).__init__(br_options)
# Create a browser with oobe property.
self.create_browser_with_oobe = False
# Clear enterprise policy before logging in.
self.clear_enterprise_policy = True
# Disable GAIA/enterprise services.
self.disable_gaia_services = True
self.auto_login = True
self.gaia_login = False
self.username = '[email protected]'
self.password = ''
def IsCrosBrowserOptions(self):
return True
| bsd-3-clause | -5,856,942,632,231,719,000 | 36.038741 | 80 | 0.642675 | false |
czcorpus/kontext | lib/corplib/fallback.py | 1 | 3332 | # Copyright (c) 2013 Institute of the Czech National Corpus
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from manatee import Corpus
class EmptyCorpus:
"""
EmptyCorpus serves as kind of a fake corpus to keep KonText operational
in some special cases (= cases where we do not need any corpus to be
instantiated which is a situation original Bonito code probably never
count with).
"""
def __init__(self, corpname: str = ''):
self._corpname = corpname
self._conf = {
'ENCODING': 'UTF-8',
'NAME': self.corpname,
'ATTRLIST': '',
'STRUCTLIST': ''
}
@property
def corpname(self):
return self._corpname
@property
def spath(self):
return None
@property
def subcname(self):
return None
@property
def subchash(self):
return None
@property
def created(self):
return None
@property
def is_published(self):
return False
@property
def orig_spath(self):
return None
@property
def orig_subcname(self):
return None
@property
def author(self):
return None
@property
def author_id(self):
return -1
@property
def description(self):
return None
def get_conf(self, param):
return self._conf.get(param, '')
def get_confpath(self, *args, **kwargs):
return None
def get_conffile(self, *args, **kwargs):
return None
def set_default_attr(self, *args, **kwargs):
pass
@property
def size(self):
return 0
@property
def search_size(self):
return 0
def get_struct(self, *args, **kwargs):
pass
def get_attr(self, *args, **kwargs):
pass
def get_info(self, *args, **kwargs):
pass
def unwrap(self) -> Corpus:
return None
def freq_dist(self, rs, crit, limit, words, freqs, norms):
pass
def filter_query(self, *args, **kwargs):
pass
def is_subcorpus(self):
return False
def save_subc_description(self, desc: str):
pass
def freq_precalc_file(self, attrname: str):
return None
@property
def corp_mtime(self):
return -1
class ErrorCorpus(EmptyCorpus):
"""
This type is used in case we encounter a corpus-initialization error
and yet we still need proper template/etc. variables initialized
(e.g. user visits URL containing non-existing sub-corpus)
"""
def __init__(self, err):
"""
arguments:
err -- an error which caused that the original corpus failed to initialize
"""
super(ErrorCorpus, self).__init__()
self._error = err
def get_error(self):
"""
returns original error
"""
return self._error
def is_subcorpus(self):
return False
| gpl-2.0 | -8,455,687,116,255,003,000 | 21.066225 | 82 | 0.602941 | false |
phantomii/restalchemy | restalchemy/tests/functional/restapi/ra_based/microservice/routes.py | 1 | 1858 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <[email protected]>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from restalchemy.api import routes
from restalchemy.tests.functional.restapi.ra_based.microservice import (
controllers)
class PortRoute(routes.Route):
__controller__ = controllers.PortController
__allow_methods__ = [routes.CREATE, routes.FILTER, routes.GET,
routes.DELETE]
class VMPowerOnAction(routes.Action):
__controller__ = controllers.VMController
class VMPowerOffAction(routes.Action):
__controller__ = controllers.VMController
class VMRoute(routes.Route):
__controller__ = controllers.VMController
__allow_methods__ = [routes.CREATE, routes.GET, routes.DELETE,
routes.FILTER, routes.UPDATE]
poweron = routes.action(VMPowerOnAction, invoke=True)
poweroff = routes.action(VMPowerOffAction, invoke=True)
ports = routes.route(PortRoute, resource_route=True)
class V1Route(routes.Route):
__controller__ = controllers.V1Controller
__allow_methods__ = [routes.FILTER]
vms = routes.route(VMRoute)
class Root(routes.Route):
__controller__ = controllers.RootController
__allow_methods__ = [routes.FILTER]
v1 = routes.route(V1Route)
| apache-2.0 | 4,405,113,161,361,378,300 | 31.034483 | 78 | 0.708827 | false |
Joshuaalbert/IonoTomo | src/ionotomo/tomography/linear_operators.py | 1 | 4255 | import tensorflow as tf
import numpy as np
from ionotomo.settings import TFSettings
from ionotomo.tomography.interpolation import RegularGridInterpolator
from ionotomo.tomography.integrate import simps
class RayOp(object):
r"""Linear operator that performs for any v(x)
h[i1,...,ir] = \int_R[i1,...,ir] ds M(x) v(x)
grid : tuple of ndim Tensors specifying grid coordinates used for interpolation
M : the function over V to integrate, defined on the *grid*
rays : Tensor with *r* ray index dimensions and last dim is size ndim
Defines the ray trajectories over which to integrate.
Shape (i1,...,ir, ndim, N)
transpose : bool
If True then Av represents \sum_R \Delta_R(x) v_R M(x)
"""
def __init__(self,grid,M,rays,dx = None,
weight = None, transpose = False,
dtype=TFSettings.tf_float):
self.dtype = dtype
self.grid = grid
self.rays = tf.cast(rays,TFSettings.tf_float)
if dx is None:
self.dx = tf.sqrt(tf.reduce_sum(tf.square(self.rays[...,1:] - self.rays[...,:-1]),axis=-2))
self.dx = tf.cumsum(tf.concat([tf.zeros_like(self.dx[...,0:1]),self.dx],axis=-1),axis=-1)
else:
nd = tf.size(tf.shape(rays))
dxshape = tf.concat([tf.ones_like(tf.shape(rays)[0:-2]),
tf.shape(rays)[nd-1:nd]],axis=0)
self.dx = tf.reshape(dx,dxshape)
if weight is not None:
self.weight = tf.reshape(tf.cast(weight,self.dtype),self.range_shape())
else:
self.weight = None
self.M = tf.cast(M,self.dtype)
self.transpose = transpose
def domain_shape(self):
return tf.shape(self.M)
def range_shape(self):
return tf.shape(self.rays)[:-2]
def shape(self):
return tf.concat([self.range_shape(),self.domain_shape()],axis=0)
def matmul(self,x,adjoint=False,adjoint_arg=False):
'''Transform [batch] matrix x with left multiplication: x --> Ax.
x: Tensor with compatible shape and same dtype as self.
See class docstring for definition of compatibility.
adjoint: Python bool. If True, left multiply by the adjoint: A^H x.
adjoint_arg: Python bool.
If True, compute A x^H where x^H is the hermitian transpose
(transposition and complex conjugation).
name: A name for this `Op.
Returns:
A Tensor with shape [..., M, R] and same dtype as self.
'''
x = tf.cast(x,self.dtype)
Ax = self.M * x
Ax = RegularGridInterpolator(self.grid,Ax,method='linear')
if self.weight is None:
Ax = Ax(tf.unstack(self.rays,axis=-2))
else:
Ax = self.weight*Ax(tf.unstack(self.rays,axis=-2))
Ax = simps(Ax, self.dx,axis = -1)
return Ax
class TECForwardEquation(RayOp):
def __init__(self,i0, grid,M,rays,dx = None,
weight = None, transpose = False,
dtype=TFSettings.tf_float):
super(TECForwardEquation,self).__init__(grid,M,rays,dx,
weight, transpose, dtype)
self.i0 = tf.cast(i0,TFSettings.tf_int)
def matmul(self,x,adjoint=False,adjoint_arg=False):
'''Transform [batch] matrix x with left multiplication: x --> Ax.
x: Tensor with compatible shape and same dtype as self.
See class docstring for definition of compatibility.
adjoint: Python bool. If True, left multiply by the adjoint: A^H x.
adjoint_arg: Python bool.
If True, compute A x^H where x^H is the hermitian transpose
(transposition and complex conjugation).
name: A name for this `Op.
Returns:
A Tensor with shape [..., M, R] and same dtype as self.
'''
Ax = super(TECForwardEquation,self).matmul(x)
Ax = Ax - Ax[self.i0:self.i0+1, ...]
return Ax
if __name__ == '__main__':
rays = np.sort(np.random.uniform(size=[2,2,3,6]),axis=-1)
M = np.random.normal(size=(100,100,100))
grid = (np.linspace(0,1,100),)*3
op = TECForwardEquation(0,grid, M, rays)
x = np.random.normal(size=(100,100,100))
sess = tf.Session()
print(sess.run(op.matmul(x)))
sess.close()
| apache-2.0 | 3,722,212,576,356,630,000 | 38.398148 | 103 | 0.601175 | false |
keflavich/pyspeckit-obsolete | examples/sn_example.py | 1 | 6163 | """
Example demonstrating how to fit a complex H-alpha profile after subtracting off a satellite line
(in this case, He I 6678.151704)
"""
import pyspeckit
sp = pyspeckit.OpticalSpectrum('sn2009ip_halpha.fits')
# start by plotting a small region around the H-alpha line
sp.plotter(xmin=6100,xmax=7000,ymax=2.23,ymin=0)
# the baseline (continuum) fit will be 2nd order, and excludes "bad"
# parts of the spectrum
# The exclusion zone was selected interatively (i.e., cursor hovering over the spectrum)
sp.baseline(xmin=6100, xmax=7000,
exclude=[6450,6746,6815,6884,7003,7126,7506,7674,8142,8231],
subtract=False, reset_selection=True, highlight_fitregion=True,
order=2)
# Fit a 4-parameter voigt (figured out through a series if guess and check fits)
sp.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1,
0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1,
0.11957267912208754, 6678.3853431367716, 4.1892742162283181, 1,
0.10506431180136294, 6589.9310414408683, 72.378997529374672, 1,],
fittype='voigt')
# Now overplot the fitted components with an offset so we can see them
# the add_baseline=True bit means that each component will be displayed with the "Continuum" added
# If this was off, the components would be displayed at y=0
# the component_yoffset is the offset to add to the continuum for plotting only (a constant)
sp.specfit.plot_components(add_baseline=True,component_yoffset=-0.2)
# Now overplot the residuals on the same graph by specifying which axis to overplot it on
# clear=False is needed to keep the original fitted plot drawn
# yoffset is the offset from y=zero
sp.specfit.plotresiduals(axis=sp.plotter.axis,clear=False,yoffset=0.20,label=False)
# save the figure
sp.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_zoom.png")
# print the fit results in table form
# This includes getting the equivalent width for each component using sp.specfit.EQW
print " ".join(["%15s %15s" % (s,s+"err") for s in sp.specfit.parinfo.parnames])," ".join(["%15s" % ("EQW"+str(i)) for i,w in enumerate(sp.specfit.EQW(components=True))])
print " ".join(["%15g %15g" % (par.value,par.error) for par in sp.specfit.parinfo])," ".join(["%15g" % w for w in sp.specfit.EQW(components=True)])
# here are some other fitted parameters that can be printed:
print "Fitted EQW:", sp.specfit.EQW()
print "Direct EQW:", sp.specfit.EQW(fitted=False)
print "Approximate FWHM:", sp.specfit.measure_approximate_fwhm()
print "Approximate FWHM (with interpolation):", sp.specfit.measure_approximate_fwhm(interpolate_factor=10)
# zoom in further for a detailed view of the profile fit
sp.plotter.axis.set_xlim(6562-150,6562+150)
sp.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_zoomzoom.png")
# now we'll re-do the fit with the He I line subtracted off
# first, create a copy of the spectrum
just_halpha = sp.copy()
# Second, subtract off the model fit for the He I component
# (identify it by looking at the fitted central wavelengths)
just_halpha.data -= sp.specfit.modelcomponents[2,:]
# re-plot
just_halpha.plotter(xmin=6100,xmax=7000,ymax=2.00,ymin=-0.3)
# this time, subtract off the baseline - we're now confident that the continuum
# fit is good enough
just_halpha.baseline(xmin=6100, xmax=7000,
exclude=[6450,6746,6815,6884,7003,7126,7506,7674,8142,8231],
subtract=True, reset_selection=True, highlight_fitregion=True, order=2)
# Do a 3-component fit now that the Helium line is gone
# I've added some limits here because I know what parameters I expect of my fitted line
just_halpha.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1,
0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1,
0.10506431180136294, 6589.9310414408683, 50.378997529374672, 1,],
fittype='voigt',
xmin=6100,xmax=7000,
limitedmax=[False,False,True,True]*3,
limitedmin=[True,False,True,True]*3,
limits=[(0,0),(0,0),(0,100),(0,100)]*3)
# overplot the components and residuals again
just_halpha.specfit.plot_components(add_baseline=False,component_yoffset=-0.1)
just_halpha.specfit.plotresiduals(axis=just_halpha.plotter.axis,clear=False,yoffset=-0.20,label=False)
# The "optimal chi^2" isn't a real statistical concept, it's something I made up
# However, I think it makes sense (but post an issue if you disagree!):
# It uses the fitted model to find all pixels that are above the noise in the spectrum
# then computes chi^2/n using only those pixels
just_halpha.specfit.annotate(chi2='optimal')
# save the figure
just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_threecomp.png")
# A new zoom-in figure
import pylab
# now hide the legend
just_halpha.specfit.fitleg.set_visible(False)
# overplot a y=0 line through the residuals (for reference)
pylab.plot([6100,7000],[-0.2,-0.2],'y--')
# zoom vertically
pylab.gca().set_ylim(-0.3,0.3)
# redraw & save
pylab.draw()
just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_threecomp_zoom.png")
# Part of the reason for doing the above work is to demonstrate that a
# 3-component fit is better than a 2-component fit
#
# So, now we do the same as above with a 2-component fit
just_halpha.plotter(xmin=6100,xmax=7000,ymax=2.00,ymin=-0.3)
just_halpha.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1,
0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1],
fittype='voigt')
just_halpha.specfit.plot_components(add_baseline=False,component_yoffset=-0.1)
just_halpha.specfit.plotresiduals(axis=just_halpha.plotter.axis,clear=False,yoffset=-0.20,label=False)
just_halpha.specfit.annotate(chi2='optimal')
just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_twocomp.png")
just_halpha.specfit.fitleg.set_visible(False)
pylab.plot([6100,7000],[-0.2,-0.2],'y--')
pylab.gca().set_ylim(-0.3,0.3)
pylab.draw()
just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_twocomp_zoom.png")
| mit | 3,298,661,510,037,429,000 | 46.775194 | 170 | 0.727568 | false |
miha-skalic/ITEKA | qt_design/twosubstr/load_data_ds.py | 1 | 8845 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'load_data_ds.ui'
#
# Created: Fri Jun 12 17:18:39 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_LoadDataDs(object):
def setupUi(self, LoadDataDs):
LoadDataDs.setObjectName(_fromUtf8("LoadDataDs"))
LoadDataDs.resize(1076, 520)
self.horizontalLayout = QtGui.QHBoxLayout(LoadDataDs)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.SubAName = QtGui.QLabel(LoadDataDs)
self.SubAName.setObjectName(_fromUtf8("SubAName"))
self.verticalLayout.addWidget(self.SubAName)
self.SubAText = QtGui.QPlainTextEdit(LoadDataDs)
self.SubAText.setObjectName(_fromUtf8("SubAText"))
self.verticalLayout.addWidget(self.SubAText)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.SubBName = QtGui.QLabel(LoadDataDs)
self.SubBName.setObjectName(_fromUtf8("SubBName"))
self.verticalLayout_2.addWidget(self.SubBName)
self.verticalWidget_2 = QtGui.QWidget(LoadDataDs)
self.verticalWidget_2.setObjectName(_fromUtf8("verticalWidget_2"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.verticalWidget_2)
self.verticalLayout_6.setMargin(0)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.SubBText = QtGui.QDoubleSpinBox(self.verticalWidget_2)
self.SubBText.setDecimals(5)
self.SubBText.setMaximum(100000.0)
self.SubBText.setProperty("value", 1.0)
self.SubBText.setObjectName(_fromUtf8("SubBText"))
self.verticalLayout_6.addWidget(self.SubBText)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem)
self.verticalLayout_2.addWidget(self.verticalWidget_2)
self.line_2 = QtGui.QFrame(LoadDataDs)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.verticalLayout_2.addWidget(self.line_2)
self.verticalWidget = QtGui.QWidget(LoadDataDs)
self.verticalWidget.setObjectName(_fromUtf8("verticalWidget"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.verticalWidget)
self.verticalLayout_5.setMargin(0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.label = QtGui.QLabel(self.verticalWidget)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_5.addWidget(self.label)
self.InitConc = QtGui.QDoubleSpinBox(self.verticalWidget)
self.InitConc.setDecimals(5)
self.InitConc.setMaximum(999999.0)
self.InitConc.setProperty("value", 1.0)
self.InitConc.setObjectName(_fromUtf8("InitConc"))
self.verticalLayout_5.addWidget(self.InitConc)
self.label_2 = QtGui.QLabel(self.verticalWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout_5.addWidget(self.label_2)
self.TimeStep = QtGui.QDoubleSpinBox(self.verticalWidget)
self.TimeStep.setDecimals(5)
self.TimeStep.setMaximum(999999.0)
self.TimeStep.setProperty("value", 1.0)
self.TimeStep.setObjectName(_fromUtf8("TimeStep"))
self.verticalLayout_5.addWidget(self.TimeStep)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem1)
self.verticalLayout_2.addWidget(self.verticalWidget)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_3 = QtGui.QLabel(LoadDataDs)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout_3.addWidget(self.label_3)
self.RateText = QtGui.QPlainTextEdit(LoadDataDs)
self.RateText.setObjectName(_fromUtf8("RateText"))
self.verticalLayout_3.addWidget(self.RateText)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.AddRep = QtGui.QPushButton(LoadDataDs)
self.AddRep.setObjectName(_fromUtf8("AddRep"))
self.verticalLayout_4.addWidget(self.AddRep)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.verticalLayout_4.addItem(spacerItem2)
self.SwitchSet = QtGui.QPushButton(LoadDataDs)
self.SwitchSet.setObjectName(_fromUtf8("SwitchSet"))
self.verticalLayout_4.addWidget(self.SwitchSet)
self.SwitchRoles = QtGui.QPushButton(LoadDataDs)
self.SwitchRoles.setObjectName(_fromUtf8("SwitchRoles"))
self.verticalLayout_4.addWidget(self.SwitchRoles)
self.line = QtGui.QFrame(LoadDataDs)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout_4.addWidget(self.line)
self.formLayout = QtGui.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label_7 = QtGui.QLabel(LoadDataDs)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_7)
self.SetNu = QtGui.QLabel(LoadDataDs)
self.SetNu.setObjectName(_fromUtf8("SetNu"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.SetNu)
self.label_5 = QtGui.QLabel(LoadDataDs)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_5)
self.RepNu = QtGui.QLabel(LoadDataDs)
self.RepNu.setObjectName(_fromUtf8("RepNu"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.RepNu)
self.verticalLayout_4.addLayout(self.formLayout)
spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Ignored)
self.verticalLayout_4.addItem(spacerItem3)
self.OKButton = QtGui.QPushButton(LoadDataDs)
self.OKButton.setObjectName(_fromUtf8("OKButton"))
self.verticalLayout_4.addWidget(self.OKButton)
self.CancelButton = QtGui.QPushButton(LoadDataDs)
self.CancelButton.setObjectName(_fromUtf8("CancelButton"))
self.verticalLayout_4.addWidget(self.CancelButton)
self.horizontalLayout.addLayout(self.verticalLayout_4)
self.retranslateUi(LoadDataDs)
QtCore.QMetaObject.connectSlotsByName(LoadDataDs)
def retranslateUi(self, LoadDataDs):
LoadDataDs.setWindowTitle(_translate("LoadDataDs", "Dialog", None))
self.SubAName.setText(_translate("LoadDataDs", "SubsA", None))
self.SubBName.setText(_translate("LoadDataDs", "SubsB", None))
self.label.setText(_translate("LoadDataDs", "Concentration", None))
self.label_2.setText(_translate("LoadDataDs", "Injection spacing (time)", None))
self.label_3.setText(_translate("LoadDataDs", "Reaction rate", None))
self.AddRep.setText(_translate("LoadDataDs", "Append replicate to set", None))
self.SwitchSet.setText(_translate("LoadDataDs", "Next set", None))
self.SwitchRoles.setText(_translate("LoadDataDs", "Switch substrate roles", None))
self.label_7.setText(_translate("LoadDataDs", "Adding to set:", None))
self.SetNu.setText(_translate("LoadDataDs", "1", None))
self.label_5.setText(_translate("LoadDataDs", "Replicates present:", None))
self.RepNu.setText(_translate("LoadDataDs", "0", None))
self.OKButton.setText(_translate("LoadDataDs", "Import", None))
self.CancelButton.setText(_translate("LoadDataDs", "Cancel", None))
| gpl-3.0 | 7,131,237,885,029,008,000 | 52.932927 | 103 | 0.705483 | false |
jor-/scipy | scipy/optimize/minpack.py | 1 | 34280 | from __future__ import division, print_function, absolute_import
import threading
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, prod, greater, array,
all, where, isscalar, asarray, inf, abs,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
from scipy._lib._util import _asarray_validated, _lazywhere
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument,
and returns a value of the same length.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable ``f(x, *args)``, optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the ``method=='hybr'`` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
sol['message'] = errors['unknown']
return sol
LEASTSQ_SUCCESS = [1, 2, 3, 4]
LEASTSQ_FAILURE = [5, 6, 7, 8]
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
estimate of the Hessian. A value of None indicates a singular matrix,
which means the curvature in parameters `x` is numerically flat. To
obtain the covariance matrix of the parameters `x`, `cov_x` must be
multiplied by the variance of the residuals -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the keys:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
See Also
--------
least_squares : Newer interface to solve nonlinear least-squares problems
with bounds on the variables. See ``method=='lm'`` in particular.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
The solution, `x`, is always a 1D array, regardless of the shape of `x0`,
or whether `x0` is a scalar.
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev,
factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible." % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError]}
# The FORTRAN return value (possible return values are >= 0 and <= 8)
info = retval[-1]
if full_output:
cov_x = None
if info in LEASTSQ_SUCCESS:
from numpy.dual import inv
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
else:
if info in LEASTSQ_FAILURE:
warnings.warn(errors[info][0], RuntimeWarning)
elif info == 0:
raise errors[info][1](errors[info][0])
return retval[0], info
def _wrap_func(func, xdata, ydata, transform):
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : array_like or object
The independent variable where the data is measured.
Should usually be an M-length sequence or an (k,M)-shaped array for
functions with k predictors, but can actually be any object.
ydata : array_like
The dependent data, a length M array - nominally ``f(xdata, ...)``.
p0 : array_like, optional
Initial guess for the parameters (length N). If None, then the
initial values will all be 1 (if the number of parameters for the
function can be determined using introspection, otherwise a
ValueError is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-d `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-d `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-d `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False, only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.) Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared
residuals of ``f(xdata, *popt) - ydata`` is minimized
pcov : 2d array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
Define the data to be fit with some noise:
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> np.random.seed(1729)
>>> y_noise = 0.2 * np.random.normal(size=xdata.size)
>>> ydata = y + y_noise
>>> plt.plot(xdata, ydata, 'b-', label='data')
Fit for the parameters a, b, c of the function `func`:
>>> popt, pcov = curve_fit(func, xdata, ydata)
>>> popt
array([ 2.55423706, 1.35190947, 0.47450618])
>>> plt.plot(xdata, func(xdata, *popt), 'r-',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
Constrain the optimization to the region of ``0 <= a <= 3``,
``0 <= b <= 1`` and ``0 <= c <= 0.5``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
>>> popt
array([ 2.43708906, 1. , 0.35015434])
>>> plt.plot(xdata, func(xdata, *popt), 'g--',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend()
>>> plt.show()
"""
if p0 is None:
# determine number of parameters by inspecting the function
from scipy._lib._util import getargspec_no_self as _getargspec
args, varargs, varkw, defaults = _getargspec(f)
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# optimization may produce garbage for float32 inputs, cast them to float64
# NaNs can not be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata, float)
else:
ydata = np.asarray(ydata, float)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata, float)
else:
xdata = np.asarray(xdata, float)
if ydata.size == 0:
raise ValueError("`ydata` must not be empty!")
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-d, sigma are errors, define transform = 1/sigma
if sigma.shape == (ydata.size, ):
transform = 1.0 / sigma
# if 2-d, sigma is the covariance matrix,
# define transform = L such that L L^T = C
elif sigma.shape == (ydata.size, ydata.size):
try:
# scipy.linalg.cholesky requires lower=True to return L L^T = A
transform = cholesky(sigma, lower=True)
except LinAlgError:
raise ValueError("`sigma` must be positive definite.")
else:
raise ValueError("`sigma` has incorrect shape.")
else:
transform = None
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
elif jac is None and method != 'lm':
jac = '2-point'
if 'args' in kwargs:
# The specification for the model function `f` does not support
# additional arguments. Refer to the `curve_fit` docstring for
# acceptable call signatures of `f`.
raise ValueError("'args' is not a supported keyword argument.")
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
ysize = len(infodict['fvec'])
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
ysize = len(res.fun)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ysize > p0.size:
s_sq = cost / (ysize - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (prod(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2"
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
| bsd-3-clause | 6,488,476,498,677,419,000 | 36.423581 | 91 | 0.587456 | false |
ARUNSOORAJPS/flipkart_gridlock | src/main.py | 1 | 2686 | # -*- coding: utf-8 -*-
# @Author: chandan
# @Date: 2017-07-08 00:32:09
# @Last Modified by: chandan
# @Last Modified time: 2017-07-08 11:13:46
from data_utils import read_file
from config import DATA_DIR, SCORE_COLUMNS
import os
from model import train_model, test_model
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import os.path as osp
ACC_FILE = 'RAW_ACCELEROMETERS.txt'
GPS_FILE = 'RAW_GPS.txt'
VEHDET_FILE = 'PROC_VEHICLE_DETECTION.txt'
SCORE_FILE = 'SEMANTIC_ONLINE.txt'
def main():
# read acc, gps, veh det for multiple drivers, scenes
X_dfs, Y_dfs = [], []
driver_dir = 'D1'
for drive_dir in os.listdir(osp.join(DATA_DIR, driver_dir)):
drive_path = osp.join(DATA_DIR, driver_dir, drive_dir)
print drive_path
acc = read_file(osp.join(drive_path, ACC_FILE))
gps = read_file(osp.join(drive_path, GPS_FILE))
veh = read_file(osp.join(drive_path, VEHDET_FILE))
score = read_file(osp.join(drive_path, SCORE_FILE))
datasets = [acc, gps, veh, score]
n_rows = min(map(len, datasets))
# sample high frequency data to lowest frequency
for i in range(len(datasets)):
# drop time column
datasets[i].drop(0, 1, inplace=True)
if len(datasets[i]) > n_rows:
step = len(datasets[i]) / n_rows
ndx = xrange(0, n_rows * step, step)
datasets[i] = datasets[i].ix[ndx]
datasets[i] = datasets[i].reset_index(drop=True)
score_df = datasets[-1]
datasets = datasets[:-1]
Y_df = score.ix[:, SCORE_COLUMNS]
# create dataset
X_df = pd.concat(datasets, axis=1, ignore_index=True)
X_df.fillna(0, inplace=True)
print "X:", X_df.shape
print "Y:", score_df.shape
X_dfs.append(X_df)
Y_dfs.append(Y_df)
# preprocess
X_df = pd.concat(X_dfs, ignore_index=True)
X = X_df.values.astype('float32')
Y = pd.concat(Y_dfs, ignore_index=True).values
print "X shape:", X.shape
print "Y shape:", Y.shape
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
X_tr, X_ts, Y_tr, Y_ts = train_test_split(X, Y, test_size=0.2)
# train
print "X Train shape:", X_tr.shape
print "Y Train shape:", Y_tr.shape
print "X test shape:", X_ts.shape
print "Y test shape:", Y_ts.shape
seq_len = 16
X_tr_seq = X_to_seq(X, seq_len, 1)
Y_tr = Y_tr[seq_len:]
X_ts_seq = X_to_seq(X_ts, seq_len, 1)
Y_ts = Y_ts[seq_len:]
#train_model(X_tr, Y_tr)
loss = test_model(X_ts_seq, Y_ts)
print loss
def X_to_seq(X, seq_len=16, stride=1):
X_seqs = []
for start_ndx in range(0, len(X) - seq_len, stride):
X_seqs.append(X[start_ndx : start_ndx + seq_len])
return np.array(X_seqs)
if __name__ == '__main__':
main() | mit | 2,656,051,572,428,761,000 | 23.87963 | 63 | 0.657856 | false |
Altoidnerd/paradichlorobenzene5 | 17.volume_dependence/creation_of_volume_aletered_pwis/experimental/matrix.dev.py | 1 | 15350 | #!/usr/bin/env python3
#################################################################################
# #
# Copyright (c) 2016 Allen Majewski (altoidnerd) #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# THE SOFTWARE. #
# #
#################################################################################
import numpy as np
import os
import sys
import numpy.linalg as la
###################
# basic math tools#
###################
def norm(arr):
sum = 0
for i in arr:
sum += float(i)**2
return sum**.5
def angle3(p1,p2,p3):
"""
Returns the bond angle corresponding
to three atomic positions.
You need to pass it numpy arrays
which is natural if you already
transformed the coordinates with
the lattice vectors
... returns in degrees
"""
v1=p2-p1
v2=p3-p2
dot = v1@v2
costheta = dot/(norm(v1)*norm(v2))
return np.arccos(costheta)*180/np.pi-180
def angle2(r1,r2):
"""
Returns the angle between
two vectors. Pass numpy
arrays.
... returns in RADIANS
"""
dot = r1@r2
costheta = dot/(norm(r1)*norm(r2))
return np.arccos(costheta)
def rotx(theta):
"""
Returns a rotations matrix
that rotates a vector by an
angle theta about the x-axis.
"""
cos = np.cos
sin = np.sin
rotmat = []
r1 = [ 1 , 0 , 0 ]
r2 = [ 0 , cos(theta),-sin(theta)]
r3 = [ 0 , sin(theta), cos(theta)]
rows=[r1,r2,r3]
for row in rows:
rotmat.append(np.array(row))
return rotmat
def roty(theta):
"""
Returns a rotations matrix
that rotates a vector by an
angle theta about the y-axis.
"""
cos = np.cos
sin = np.sin
rotmat = []
r1 = [ cos(theta), 0 , sin(theta)]
r2 = [ 0 , 1 , 0 ]
r3 = [-sin(theta), 0 , cos(theta)]
rows=[r1,r2,r3]
for row in rows:
rotmat.append(np.array(row))
return rotmat
def rotz(theta):
"""
Returns a rotations matrix
that rotates a vector by an
angle theta about the z-axis.
"""
cos = np.cos
sin = np.sin
rotmat = []
r1 = [ cos(theta),-sin(theta), 0 ]
r2 = [ sin(theta), cos(theta), 0 ]
r3 = [ 0 , 0 , 1 ]
rows=[r1,r2,r3]
for row in rows:
rotmat.append(np.array(row))
return rotmat
# for testing
# unit vectors
xhat=np.array([1,0,0])
yhat=np.array([0,1,0])
zhat=np.array([0,0,1])
# common angles
t0 =2*np.pi
t30 = np.pi/6
t60 = np.pi/3
t90 = np.pi/2
t180= np.pi
t270=3*np.pi/2
t360=t0
###################
# scf.in parsiing #
###################
def get_pwi_latvecs(pwi_file=None):
"""
Opens a pw.x input file and returns a np.matrix
of the CELL_PARAMETERS card. Recall
<alat_coordinates> = latvecs @ <crystal coordinates>
"""
if pwi_file is None:
pwi_file = smart_picker('pwi', os.getcwd())
pwi = open(pwi_file, 'r').readlines()
cell_params_start = min(
[ pwi.index(line) for line in pwi
if 'CELL_PARAM' in line ]
)
params = []
c = cell_params_start
return np.array([ line.split() for line in pwi[c+1:c+4] ], float).T
def get_pwi_crystal_coords(pwi_file=None, names=False):
"""
Opens a pw.x input file
and returns a numpy array of coordinates.
WARNING: it is zero indexed unline in PWSCF
and the get_efg_tensors() function
"""
if pwi_file is None:
pwi_file = smart_picker('pwi', os.getcwd())
pwi = open(pwi_file, 'r').readlines()
nat = int(sanitize_ends("".join([line for line in pwi if 'nat=' in line])))
positions_card_startline = min(
[ pwi.index(line) for line in pwi
if 'ATOMIC_POSITIONS' in line]
)
p = positions_card_startline
if not names:
return np.array([[0,0,0]]+ [ line.split()[1:] for line in pwi[ p: p+nat+1 ]][1:], float)
return [ line.split() for line in pwi[ p: p+nat+1 ]]
def get_pwi_atomic_species(pwi_file=None, coordinates=False, tostring=False):
if pwi_file is None:
pwi_file = smart_picker('pwi', os.getcwd())
pwi = open(pwi_file,'r').readlines()
nat = int(sanitize_ends("".join([line for line in pwi if 'nat=' in line])))
positions_card_startline = min(
[ pwi.index(line) for line in pwi
if 'ATOMIC_POSITIONS' in line]
)
p = positions_card_startline
if not coordinates:
return [ line.split()[0] for line in pwi[ p: p+nat+1 ]]
if not tostring:
return [ line.split() for line in pwi[ p: p+nat+1 ]]
return [ line for line in pwi[ p: p+nat+1 ]]
def get_pwi_alat_coords(pwi_file=None, tostring=False):
"""
Retrurns the coordinates in alat units
"""
latvecs = get_pwi_latvecs(pwi_file)
if not tostring:
return np.array([ np.dot(latvecs,vec).tolist() for vec in get_pwi_crystal_coords() ])
else:
return [ ' '.join( list( map( str, np.dot(latvecs, vec)))) for vec in get_pwi_crystal_coords() ]
def get_pwi_pseudos(pwi_file=None):
"""
Returns a list of the pseudopotentials
used in a pwscf input.
"""
if pwi_file is None:
pwi_file = smart_picker('pwi', os.getcwd())
pwi = open(pwi_file, 'r').readlines()
# pseudo-validation - not yet
# pseudos_wordlist=[ "UPF","psl","pz","vwm","pbe","blyp","pw91","tpss","coulomb","ae","mt","bhs","vbc","van""rrkj","rrkjus","kjpaw","bpaw"]
pwi = open(pwi_file, 'r').readlines()
atomic_species_startline = min(
[ pwi.index(line) for line in pwi
if 'SPECIES' in line ]
)
a = atomic_species_startline
ntyp = int(sanitize_ends("".join([line for line in pwi if 'ntyp=' in line])))
species_list = []
n = a + 1
while len(species_list) < ntyp:
if not(set(pwi[n]).issubset({'\n','','\t','\r','!','/'})):
species_list.append(pwi[n])
n += 1
else:
n += 1
if len(species_list) == ntyp:
return [ li.split()[2] for li in species_list ]
###################
# magres parsing #
###################
def get_efg_tensors(magres_file=None):
"""
Arguement is a magres format efg outfile.
Returns a list of EFG matrices (numpy.ndarray),
1-indexed as site number in pwscf
(the zeroth position is empty).
"""
if magres_file is None:
magres_file = [ fil for fil in os.listdir('.') if fil.endswith('magres') ][0]
print('magres_file <YUP ITS MEH> not specified. Openeing: {}'.format(magres_file))
magres = open(magres_file,'r').readlines()
return [ np.array([line.split()[3:6], line.split()[6:9], line.split()[9:12]], float) for line in magres if 'efg' in line ]
def get_raw_efg_eigvecs(magres_file=None):
return np.array( [[]] + [ eigvecs(thing) for thing in get_efg_tensors(magres_file)[1:] ] )
def get_raw_efg_eigvals(magres_file=None):
return np.array( [[]] + [ eigvals(thing) for thing in get_efg_tensors(magres_file)[1:] ] )
#
# We may like to brush the dust off our linear algebra instinct
#
# efgs = get_efg_tensors()
# eigenvals = get_raw_efg_eigvals()
# eigenvecs = get_raw_efg_eigvecs()
# then we have, where is the nuclesr site ndex: 0 <= i <= nat; k is x,y,z so 0 <= k <= 2
# ( efgs[i] @ eigenvecs[i].T[k] ) / eigenvals[i][k] == eigenvecs[i].T[k]
#
# though it will not always evaluate to true due to some floating point errors.
#
def get_eigenparis(magres_file=None):
"""
get_eigenparis()[i][j][k]:
i in { 1..nat }; j in {0,1}; k in {0,1,2}
i: {1..nat} -> atomic specie
j: {0,1} -> {eigenvalues, eigenvectos/axes}
k: {0,1,2} -> {x,y,z}/{xx,yy,zz}
"""
return np.array( [[]] + [ (eigvals(thing), eigvecs(thing)) for thing in get_efg_tensors(magres_file)[1:] ] )
def eigenmachine(magres_file=None):
"""
eigen_machine()[i][k]:
i in {0, 1}-> {VALS, VECS}
k in {0, nat -1} -> specie
NOTE: NOT 1-INDEXED!!! ZERO INDEXED FUNCTION
"""
return la.eigh(get_efg_tensors(magres_file)[1:])
def get_efgs_dict(magres_file=None, nat=24):
"""
get_efgs_dict('infile')
-> dict(k,v) where k is an int
atom index e.g. 1, 2, 3
and v is a dict of
efg tensor parameters
specify option getlist=True
to return a list instead
"""
efgs_dict = dict()
for i in range(1, nat+1):
efgs_dict[i] = dict()
spec_data = [[]] + [ la.eigh(get_efg_tensors(magres_file)[k]) for k in range(1,nat+1) ]
for k in range(1,nat+1):
tmpdict = dict()
data = spec_data[k]
mygenvals = data[0]
lmygenvals = mygenvals.tolist()
sort_genvals = np.sort( np.abs( spec_data[k][0] )).tolist()
vzzpm = sort_genvals.pop()
vyypm = sort_genvals.pop()
vxxpm = sort_genvals.pop()
# print('vzzpm, vyypm, vzzpm', vzzpm, vyypm, vzzpm)
mygenvecs = data[1].T
lmygenvecs = mygenvecs.tolist()
if vzzpm in data[0]:
VZZ = vzzpm
else:
VZZ = -vzzpm
if vyypm in data[0]:
VYY = vyypm
else:
VYY = -vyypm
if vxxpm in data[0]:
VXX = vxxpm
else:
VXX = -vxxpm
efgs_dict[k]['Vzz'] = VZZ
efgs_dict[k]['Vyy'] = VYY
efgs_dict[k]['Vxx'] = VXX
efgs_dict[k]['z-axis'] = lmygenvecs[lmygenvals.index(VZZ)]
efgs_dict[k]['y-axis'] = lmygenvecs[lmygenvals.index(VYY)]
efgs_dict[k]['x-axis'] = lmygenvecs[lmygenvals.index(VXX)]
return efgs_dict
####################
# efg.out parsing #
####################
def get_axes(infile, keyword=None):
"""
get_axes('efg.*.out') -> array-like
argument is an efg output file
kw -> e.g. 'Cl 1'
Returns an array containing the
primed principal axes components.
Override the default keyword
using the kw argument.
get_axes(infile)[0] <- X'
get_axes(infile)[1] <- Y'
get_axes(infile)[2] <- Z'
"""
#Vxx, X =-1.6267, np.array([ -0.310418, -0.435918, 0.844758 ])
#Vyy, Y =-1.9819, np.array([ 0.522549, 0.664099, 0.534711 ])
#Vzz, Z = 3.6086, np.array([ -0.794093, 0.607411, 0.021640 ])
#keyword = 'Cl 1'
if keyword is None:
keywrod = 'Cl 1'
f = open(infile,'r').readlines()
relevant = [ line.strip().replace(')','').replace('(','') for line in f if kw in line and 'axis' in line ]
axes_list = [ line.split()[5:] for line in relevant ]
axes_list = np.array([ list(map(float, axis)) for axis in axes_list ])
# require the same signs as the refernece set of axes
if axes_list[0][0] > 0:
axes_list[0] = -1*axes_list[0]
if axes_list[1][0] < 0:
axes_list[1] = -1*axes_list[1]
if axes_list[2][0] > 0:
axes_list[2] = -1*axes_list[2]
return axes_list
#this_X = get_axes(sys.argv[1])[0]
#this_Y = get_axes(sys.argv[1])[1]
#this_Z = get_axes(sys.argv[1])[2]
#print(this_X,this_Y,this_Z)
#get_axes(sys.argv[1])
def get_Vijs(infile):
f = open(infile,'r').readlines()
relevant = [ line.strip().replace(')','').replace('(','') for line in f if kw in line and 'axis' in line ]
axes_list = [ line.split()[5:] for line in relevant ]
axes_list = np.array([ list(map(float, axis)) for axis in axes_list ])
def get_angles(infile, tensor=None):
"""
get_angles('efg.*.out') -> array-like
argument is an efg output file
Returns an array containing the
euler angles for the given
EFG principal axes relative
to the fixed axes (hard coded).
get_angles(infile)[0] <- theta_X
get_angles(infile)[1] <- theta_Y
"""
if tensor is None:
Vxx, X =-1.6267, np.array([ -0.310418, -0.435918, 0.844758 ])
Vyy, Y =-1.9819, np.array([ 0.522549, 0.664099, 0.534711 ])
Vzz, Z = 3.6086, np.array([ -0.794093, 0.607411, 0.021640 ])
this_X = get_axes(infile)[0]
this_Y = get_axes(infile)[1]
this_Z = get_axes(infile)[2]
theta_X = np.arcsin((this_Z@Y)/np.linalg.norm(Y))
theta_Y = np.arcsin((this_Z@X)/(np.linalg.norm(X)*np.cos(theta_X)))
return np.array( [ theta_X, theta_Y ])
#####################
# pwscf.out parsing #
#####################
def get_pwo_forces(pwo_file=None):
if pwo_file is None:
pwo_file = [ fil for fil in os.listdir('.') if (fil.endswith('out') or fil.endswith('pwo')) and ('scf' in fil or 'relax' in fil or 'md' in fil ) ][0]
print('No input specified: opening {}'.format(pwo_file))
pwo = open(pwo_file,'r').readlines()
force_lines = [ line for line in pwo if 'force =' in line ]
numlines = len(force_lines)
nat = int(numlines/7)
return (force_lines[:nat])
####################
# util/helpers #
####################
def smart_picker(find_type, path='.'):
if find_type == 'pwi':
choice = [ fil for fil in os.listdir('.')
if ( (fil.endswith('in') or fil.endswith('pwi'))
or 'inp' in fil)
and ('scf' in fil
or 'relax' in fil
or 'md' in fil) ][0]
if find_type == 'magres':
choice = [ fil for fil in os.listdir('.')
if fil.endswith('magres') ][0]
if find_type == 'pwo':
choice = [ fil for fil in os.listdir('.')
if (fil.endswith('out') or fil.endswith('pwo'))
and ('scf' in fil or 'relax' in fil or 'md' in fil ) ][0]
print("No input specified. Opening: {}".format(choice))
return choice
def sanitize_ends(s, targets=' \n\tabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"`}{[]\|/><?~!&^%$#@='):
while s[0] in targets:
s = s[1:]
while s[-1] in targets:
s = s[:-1]
return s
#################################
# #
# crystal CIF parsing #
# #
#################################
def get_cif_cell_params(cif_infile, *args):
"""
Returns a dict of keys in *args
and associated values from the
specified cif infile
"""
f = open(cif_infile,'r').readlines()
d=dict()
for word in args:
#print(word)
good_lines = [ line.strip() for line in f if word in line]
#print(good_lines)
data = []
for line in good_lines:
data.append(line.split()[-1])
if len(data) == 1:
d[word] = data[0]
else:
d[word] = data
for line in good_lines:
d[line.split()[0]] = line.split()[1]
return d
def get_monoclinic_P_latvecs(a,b,c,beta):
"""
Takes as argument a,b,c, beta
and produces an array of lattice
vectors
get_monoclinic_P_latvecs(a, b, c, beta)
-> np.array([ v1, v2, v3 ])
...
From PW_INPUT.html:
-12 Monoclinic P, unique axis b
celldm(2)=b/a
celldm(3)=c/a,
celldm(5)=cos(ac)
v1 = (a,0,0), v2 = (0,b,0), v3 = (c*cos(beta),0,c*sin(beta))
where beta is the angle between axis a and c
"""
v1 = [a,0,0]
v2 = [0,b,0]
v3 = [c*np.cos(beta),0,c*np.sin(beta)]
return np.array([v1,v2,v3])
| gpl-3.0 | -2,822,358,396,394,891,300 | 26.264654 | 153 | 0.587752 | false |
felix021/mixo | mixo.py | 1 | 6871 | #!/usr/bin/python
import sys
import struct
import random
import signal
try:
import gevent
from gevent import socket
from gevent.server import StreamServer
from gevent.socket import create_connection, gethostbyname
except:
print >>sys.stderr, "please install gevent first!"
sys.exit(1)
import config
keys = []
if config.seed:
r = random.Random(config.seed)
keys = [r.randint(0, 255) for i in xrange(0, 1024)]
keys += keys
else:
raise Exception("config.seed not set!")
try:
import ctypes
try:
filename = "./xor.so"
xor = ctypes.CDLL(filename)
except:
import platform
bits, exetype = platform.architecture()
filename = "./xor_%s_%s.so" % (exetype, bits)
xor = ctypes.CDLL(filename)
print >>sys.stderr, "loaded %s, using faster xor" % filename
key_str = ''.join(map(chr, keys))
if xor.set_xor_table(key_str, len(key_str)) == 1:
raise Exception("set xor table failed")
def encrypt(data, pos):
ret = ctypes.create_string_buffer(data)
xor.xor(ret, len(data), pos)
return ret.raw[:-1]
except:
print >>sys.stderr, "can't load xor.so, using python native."
def encrypt(data, pos):
return ''.join(map(lambda x, y: chr(ord(x) ^ y), data, keys[pos:pos+len(data)]))
decrypt = encrypt
def dumps(x):
return ' '.join(map(lambda t: '%x' % struct.unpack('B', t)[0], x))
class XSocket(gevent.socket.socket):
def __init__(self, socket = None, addr = None, secure = False):
if socket is not None:
gevent.socket.socket.__init__(self, _sock = socket)
elif addr is not None:
gevent.socket.socket.__init__(self)
self.connect(addr)
else:
raise Exception("XSocket.init: bad arguments")
self.secure = secure
self.recv_idx = 0
self.send_idx = 0
def unpack(self, fmt, length):
data = self.recv(length)
if len(data) < length:
raise Exception("XSocket.unpack: bad formatted stream")
return struct.unpack(fmt, data)
def pack(self, fmt, *args):
data = struct.pack(fmt, *args)
return self.sendall(data)
def recv(self, length, *args):
data = gevent.socket.socket.recv(self, length, *args)
if config.debug: print 'Received:', dumps(data)
if self.secure:
data = decrypt(data, self.recv_idx)
self.recv_idx = (self.recv_idx + len(data)) % 1024
if config.debug: print 'Decrypted:', dumps(data), '--', data
return data
def sendall(self, data, flags = 0):
if config.debug: print 'Send:', dumps(data), '--', data
if self.secure:
data = encrypt(data, self.send_idx)
self.send_idx = (self.send_idx + len(data)) % 1024
if config.debug: print 'Encrypted:', dumps(data)
return gevent.socket.socket.sendall(self, data, flags)
def forward(self, dest):
try:
while True:
data = self.recv(1024)
if not data:
break
dest.sendall(data)
#except IOError, e: pass
finally:
print 'connection closed'
self.close()
dest.close()
class SocksServer(StreamServer):
def __init__(self, listener, **kwargs):
StreamServer.__init__(self, listener, **kwargs)
def handle(self, sock, addr):
print 'connection from %s:%s' % addr
src = XSocket(socket = sock, secure = True)
#socks5 negotiation step2: specify command and destination
ver, cmd, rsv, atype = src.unpack('BBBB', 4)
if cmd != 0x01:
src.pack('BBBBIH', 0x05, 0x07, 0x00, 0x01, 0, 0)
return
if atype == 0x01: #ipv4
host, port = src.unpack('!IH', 6)
hostip = socket.inet_ntoa(struct.pack('!I', host))
elif atype == 0x03: #domain name
length = src.unpack('B', 1)[0]
hostname, port = src.unpack("!%dsH" % length, length + 2)
hostip = gethostbyname(hostname)
host = struct.unpack("!I", socket.inet_aton(hostip))[0]
elif atype == 0x04: #ipv6: TODO
src.pack('!BBBBIH', 0x05, 0x07, 0x00, 0x01, 0, 0)
return
else:
src.pack('!BBBBIH', 0x05, 0x07, 0x00, 0x01, 0, 0)
return
try:
dest = XSocket(addr = (hostip, port))
except IOError, ex:
print "%s:%d" % addr, "failed to connect to %s:%d" % (hostip, port)
src.pack('!BBBBIH', 0x05, 0x03, 0x00, 0x01, host, port)
return
src.pack('!BBBBIH', 0x05, 0x00, 0x00, 0x01, host, port)
gevent.spawn(src.forward, dest)
gevent.spawn(dest.forward, src)
def close(self):
sys.exit(0)
@staticmethod
def start_server():
server = SocksServer(('0.0.0.0', config.server_port))
gevent.signal(signal.SIGTERM, server.close)
gevent.signal(signal.SIGINT, server.close)
print "Server is listening on 0.0.0.0:%d" % config.server_port
server.serve_forever()
class PortForwarder(StreamServer):
def __init__(self, listener, dest, **kwargs):
StreamServer.__init__(self, listener, **kwargs)
self.destaddr = dest
def handle(self, sock, addr):
src = XSocket(socket = sock)
#socks5 negotiation step1: choose an authentication method
ver, n_method = src.unpack('BB', 2)
if ver != 0x05:
src.pack('BB', 0x05, 0xff)
return
if n_method > 0:
src.recv(n_method)
src.pack('!BB', 0x05, 0x00) #0x00 means no authentication needed
print "Forwarder: connection from %s:%d" % addr
try:
dest = XSocket(addr = self.destaddr, secure = True)
except IOError, ex:
print "%s:%d" % addr, "failed to connect to SocksServer %s:%d" % self.destaddr
print ex
return
gevent.spawn(src.forward, dest)
gevent.spawn(dest.forward, src)
def close(self):
sys.exit(0)
@staticmethod
def start_server():
forward_addr = (config.forward_host, config.forward_port)
server_addr = (config.server_host, config.server_port)
server = PortForwarder(forward_addr, server_addr)
gevent.signal(signal.SIGTERM, server.close)
gevent.signal(signal.SIGINT, server.close)
print "Forwarder is listening on %s:%d for Server %s:%d" % \
(config.forward_host, config.forward_port, \
config.server_host, config.server_port)
server.serve_forever()
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
PortForwarder.start_server()
else:
SocksServer.start_server()
| unlicense | -23,230,266,414,420,976 | 29.537778 | 90 | 0.569495 | false |
MMunibas/FittingWizard | scripts/esp-fit/src/pun2charmmlpun.py | 1 | 2045 | #!/usr/bin/env python
#
# Copyright 2013 Tristan Bereau and Christian Kramer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
####################
#
# This script transforms a .pun file into a charmm-readable .lpun file.
# Optimized Parameters can be given
import sys
import mtp_tools
punfile = ''
parmfile = ''
##############
# Read input
for i in range(len(sys.argv)):
if sys.argv[i] == '-pun':
punfile = sys.argv[i+1]
elif sys.argv[i] == '-par':
parmfile = sys.argv[i+1]
elif sys.argv[i] == '-h':
print "Usage: python pun2charmmlpun.py -pun [file] [-par [parfile]] [-h]"
exit(0)
if punfile == '':
print "Usage: python pun2charmmlpun.py -pun [file] [-par [parfile]] [-h]"
exit(0)
#############
# Check that the file does not end in .lpun, otherwise quit.
if punfile[punfile.rindex('.'):] == '.lpun':
print "Error: the script will generate a .lpun file, please rename current file."
exit(1)
# Read prmfile if given
prms = {}
if parmfile != '':
import numpy
f = open(parmfile,'r')
a = f.readlines()
f.close()
for line in a:
b = line.split()
prms[(b[0][2:-2],b[1][1:-2])] = numpy.array([float(b[i+3]) for i in range(len(b)-3)])
mol = mtp_tools.molecule()
mol.readfrompunfile(punfile)
mol.Calc_locMTP()
if parmfile != '':
for atom in mol.atoms:
atom.chrg = prms[(atom.atype,'chrg')]
atom.dloc = prms[(atom.atype,'dloc')]
atom.Qloc = prms[(atom.atype,'Qloc')]
mol.Calc_gloMTP()
mol.adjust_charge()
mol.write_localized_mtp_file(punfile[:punfile.rindex('.')]+'.lpun')
| bsd-3-clause | -4,360,621,419,761,082,000 | 25.558442 | 89 | 0.652323 | false |
franklingu/leetcode-solutions | questions/largest-rectangle-in-histogram/Solution.py | 1 | 1401 | """
Given n non-negative integers representing the histogram's bar height where the width of each bar is 1, find the area of largest rectangle in the histogram.

Above is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].

The largest rectangle is shown in the shaded area, which has area = 10 unit.
Example:
Input: [2,1,5,6,2,3]
Output: 10
"""
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
stack = []
area = 0
for i, num in enumerate(heights):
while stack and num < heights[stack[-1]]:
idx = stack.pop()
h = heights[idx]
if stack:
prev_idx = stack[-1]
else:
prev_idx = -1
curr_area = h * (i - prev_idx - 1)
area = max(area, curr_area)
stack.append(i)
if not stack:
return area
while stack:
idx = stack.pop()
h = heights[idx]
if stack:
prev_idx = stack[-1]
else:
prev_idx = -1
curr_area = h * (len(heights) - prev_idx - 1)
area = max(area, curr_area)
return area
| mit | -5,462,530,115,053,818,000 | 27.591837 | 156 | 0.532477 | false |
Hearen/OnceServer | pool_management/bn-xend-core/xend/NetworkMonitor.py | 1 | 1832 | import logging
log = logging.getLogger("network")
log.setLevel(logging.DEBUG)
file_handle = logging.FileHandler("/var/log/xen/network.log")
log.addHandler(file_handle)
def test_ip(ip):
import os
import subprocess
cmd = 'ping -w 3 %s' % ip
re = subprocess.call(cmd, shell=True)
if re:
return False
else:
return True
def get_running_domains():
import os
output = os.popen("xm list --state=running | tail -n +2 | grep -v Domain-0 | awk '{print $1}'").readlines()
if len(output) > 0:
return [x.strip() for x in output]
else:
return []
def get_gateway():
import os
output = os.popen("route -v | grep default | awk '{print $2}'").readlines()
if len(output) > 0:
gateway = output[0].strip()
return gateway
else:
return None
import threading
import time
import os
class RunNetworkMonitor(threading.Thread):
def run(self):
while True:
try:
time.sleep(3)
gateway = get_gateway()
if not gateway or not test_ip(gateway):
log.debug("gateway is unreachable, closing running vms")
vms = get_running_domains()
log.debug("running vms are: %s" % vms)
for vm in vms:
log.debug("close %s" % vm)
output = os.popen("xm destroy %s" % vm).readlines()
else:
log.debug("gateway is %s now, check for connection" % gateway)
log.debug("gateway is reachable, will check again after 3 seconds")
except BaseException, e:
log.debug(e)
def main():
thread = RunNetworkMonitor()
thread.start()
if __name__ == '__main__':
main()
| mit | -6,437,564,913,411,883,000 | 28.548387 | 111 | 0.540393 | false |
andigena/payloadbf | docs/conf.py | 1 | 1332 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'payloadbf'
year = u'2017'
author = u'tukan'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.2.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/andigena/payloadbf/issues/%s', '#'),
'pr': ('https://github.com/andigena/payloadbf/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| mit | 2,928,228,183,442,226,000 | 24.615385 | 70 | 0.652402 | false |
anthraxx/diffoscope | tests/comparators/test_tar.py | 1 | 2690 | # -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2015 Jérémy Bobbio <[email protected]>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import pytest
from diffoscope.config import Config
from diffoscope.comparators.tar import TarFile
from diffoscope.comparators.missing_file import MissingFile
from utils.data import load_fixture, get_data
from utils.nonexisting import assert_non_existing
tar1 = load_fixture('test1.tar')
tar2 = load_fixture('test2.tar')
def test_identification(tar1):
assert isinstance(tar1, TarFile)
def test_no_differences(tar1):
difference = tar1.compare(tar1)
assert difference is None
@pytest.fixture
def differences(tar1, tar2):
return tar1.compare(tar2).details
def test_listing(differences):
expected_diff = get_data('tar_listing_expected_diff')
assert differences[0].unified_diff == expected_diff
def test_symlinks(differences):
assert differences[2].source1 == 'dir/link'
assert differences[2].source2 == 'dir/link'
assert differences[2].comment == 'symlink'
expected_diff = get_data('symlink_expected_diff')
assert differences[2].unified_diff == expected_diff
def test_text_file(differences):
assert differences[1].source1 == 'dir/text'
assert differences[1].source2 == 'dir/text'
expected_diff = get_data('text_ascii_expected_diff')
assert differences[1].unified_diff == expected_diff
def test_compare_non_existing(monkeypatch, tar1):
assert_non_existing(monkeypatch, tar1)
no_permissions_tar = load_fixture('no-perms.tar')
# Reported as Debian #797164. This is a good way to notice if we unpack directories
# as we won't be able to remove files in one if we don't have write permissions.
def test_no_permissions_dir_in_tarball(monkeypatch, no_permissions_tar):
# We want to make sure OSError is not raised.
# Comparing with non-existing file makes it easy to make sure all files are unpacked
monkeypatch.setattr(Config(), 'new_file', True)
no_permissions_tar.compare(MissingFile('/nonexistent', no_permissions_tar))
| gpl-3.0 | 6,385,787,016,016,936,000 | 36.319444 | 88 | 0.747674 | false |
tertychnyy/rrcki-backup | lib/rrckibackup/BKPSite.py | 1 | 1514 | import json
import sys
from BKPLogger import BKPLogger
def getJSON(filepath):
json_data = open(filepath)
data = json.load(json_data)
json_data.close()
return data
class BKPSite:
def __init__(self, path):
#logger
_logger = BKPLogger().getLogger('BKPSite')
self.name = 'default'
self.server = '127.0.0.1'
self.user = 'root'
self.port = 22
self.key = ''
site = getJSON(path)
if site['name'] is not None:
self.name = site['name']
else:
self._logger.error("Empty site.name")
sys.exit(1)
if site['server'] is not None:
self.server = site['server']
else:
self._logger.error("Empty site.server")
sys.exit(1)
if site['user'] is not None:
self.user = site['user']
else:
self._logger.error("Empty site.server")
sys.exit(1)
if site['port'] is not None:
self.port = site['port']
else:
self._logger.error("Empty site.port")
sys.exit(1)
if site['key'] is not None:
self.key = site['key']
else:
self._logger.error("Empty site.key")
sys.exit(1)
def getSSH(self):
ssh = self.getSSHHead() + " " + self.user + "@" + self.server
return ssh
def getSSHHead(self):
ssh = 'ssh -i ' + self.key + " -p " + str(self.port)
return ssh
| gpl-2.0 | 6,836,492,775,590,052,000 | 25.103448 | 69 | 0.5 | false |
tomasbasham/ratelimit | ratelimit/decorators.py | 1 | 4409 | '''
Rate limit public interface.
This module includes the decorator used to rate limit function invocations.
Additionally this module includes a naive retry strategy to be used in
conjunction with the rate limit decorator.
'''
from functools import wraps
from math import floor
import time
import sys
import threading
from ratelimit.exception import RateLimitException
from ratelimit.utils import now
class RateLimitDecorator(object):
'''
Rate limit decorator class.
'''
def __init__(self, calls=15, period=900, clock=now(), raise_on_limit=True):
'''
Instantiate a RateLimitDecorator with some sensible defaults. By
default the Twitter rate limiting window is respected (15 calls every
15 minutes).
:param int calls: Maximum function invocations allowed within a time period.
:param float period: An upper bound time period (in seconds) before the rate limit resets.
:param function clock: An optional function retuning the current time.
:param bool raise_on_limit: A boolean allowing the caller to avoiding rasing an exception.
'''
self.clamped_calls = max(1, min(sys.maxsize, floor(calls)))
self.period = period
self.clock = clock
self.raise_on_limit = raise_on_limit
# Initialise the decorator state.
self.last_reset = clock()
self.num_calls = 0
# Add thread safety.
self.lock = threading.RLock()
def __call__(self, func):
'''
Return a wrapped function that prevents further function invocations if
previously called within a specified period of time.
:param function func: The function to decorate.
:return: Decorated function.
:rtype: function
'''
@wraps(func)
def wrapper(*args, **kargs):
'''
Extend the behaviour of the decorated function, forwarding function
invocations previously called no sooner than a specified period of
time. The decorator will raise an exception if the function cannot
be called so the caller may implement a retry strategy such as an
exponential backoff.
:param args: non-keyword variable length argument list to the decorated function.
:param kargs: keyworded variable length argument list to the decorated function.
:raises: RateLimitException
'''
with self.lock:
period_remaining = self.__period_remaining()
# If the time window has elapsed then reset.
if period_remaining <= 0:
self.num_calls = 0
self.last_reset = self.clock()
# Increase the number of attempts to call the function.
self.num_calls += 1
# If the number of attempts to call the function exceeds the
# maximum then raise an exception.
if self.num_calls > self.clamped_calls:
if self.raise_on_limit:
raise RateLimitException('too many calls', period_remaining)
return
return func(*args, **kargs)
return wrapper
def __period_remaining(self):
'''
Return the period remaining for the current rate limit window.
:return: The remaing period.
:rtype: float
'''
elapsed = self.clock() - self.last_reset
return self.period - elapsed
def sleep_and_retry(func):
'''
Return a wrapped function that rescues rate limit exceptions, sleeping the
current thread until rate limit resets.
:param function func: The function to decorate.
:return: Decorated function.
:rtype: function
'''
@wraps(func)
def wrapper(*args, **kargs):
'''
Call the rate limited function. If the function raises a rate limit
exception sleep for the remaing time period and retry the function.
:param args: non-keyword variable length argument list to the decorated function.
:param kargs: keyworded variable length argument list to the decorated function.
'''
while True:
try:
return func(*args, **kargs)
except RateLimitException as exception:
time.sleep(exception.period_remaining)
return wrapper
| mit | -1,125,524,861,554,458,100 | 35.438017 | 98 | 0.629621 | false |
hightower8083/chimeraCL | chimeraCL/frame.py | 1 | 2133 | import numpy as np
class Frame():
def __init__(self, configs_in, comm=None):
self._process_configs(configs_in)
def _process_configs(self, configs_in):
self.Args = configs_in
if 'Steps' not in self.Args:
self.Args['Steps'] = 1.
if 'Velocity' not in self.Args:
self.Args['Velocity'] = 0.
if 'dt' not in self.Args:
self.Args['dt'] = 1
if 'DensityProfiles' not in self.Args:
self.Args['DensityProfiles'] = None
def shift_grids(self, grids=[], steps=None):
if steps is None:
steps = self.Args['Steps']
x_shift = steps * self.Args['dt'] * self.Args['Velocity']
for grid in grids:
for store in [grid.Args, grid.DataDev]:
for arg in ['Xmax','Xmin','Xgrid']:
store[arg] += x_shift
def inject_plasma(self, species, grid, steps=None):
if steps is None:
steps = self.Args['Steps']
x_shift = steps * self.Args['dt'] * self.Args['Velocity']
for specie in species:
if specie.Args['Np'] == 0:
specie.Args['right_lim'] = grid.Args['Xmax'] - x_shift
inject_domain = {}
inject_domain['Xmin'] = specie.Args['right_lim']
inject_domain['Xmax'] = inject_domain['Xmin'] + x_shift
inject_domain['Rmin'] = grid.Args['Rmin']*(grid.Args['Rmin']>0)
inject_domain['Rmax'] = grid.Args['Rmax']
specie.make_new_domain(inject_domain,
density_profiles=self.Args['DensityProfiles'])
if 'InjectorSource' in specie.Args.keys():
specie.add_new_particles(specie.Args['InjectorSource'])
else:
specie.add_new_particles()
for specie in species:
specie.free_added()
specie.sort_parts(grid=grid)
specie.align_parts()
Num_ppc = np.int32(np.prod(specie.Args['Nppc'])+1)
x_max = specie.DataDev['x'][-Num_ppc:].get().max()
specie.Args['right_lim'] = x_max + 0.5*specie.Args['ddx']
| gpl-3.0 | 7,726,507,622,837,051,000 | 32.328125 | 75 | 0.532583 | false |
mice-software/maus | bin/analyze_data_online.py | 1 | 14624 | #!/usr/bin/env python
# This file is part of MAUS: http://micewww.pp.rl.ac.uk:8080/projects/maus
#
# MAUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
"""
Online run script to get maus to run online.
Contains subprocess commands to
- set up celery nodes
- set up web app
- set up input_transform
- set up merger_outputs
- logging
Also some reasonably complicated process handling.
Each of the subprocesses is handled by the subprocess module. Cleanup of child
processes is done automatically on exit.
Only one instance of analyze_data_online is allowed to run at any time. This is
controlled by a lock file. Additionally the lock file contains a list of child
process ids. If this script fails to clean up at the end of the run, for example
if it is killed by an external signal, then next time it is run we can
kill all child processes.
Any command line arguments are passed to the MAUS input-transform and all MAUS
merge-output processes
"""
# would be great to have some tests for this
import sys
import os
import signal
import subprocess
import time
import string # pylint: disable=W0402
import pymongo
MONGODB = 'maus-new' # no '.' character
LOCKFILE = os.path.join(os.environ['MAUS_ROOT_DIR'], 'tmp', '.maus_lockfile')
PROCESSES = []
# maximum % of total memory usage before a process is restarted
MAX_MEM_USAGE = 15.
# time between polls in seconds
POLL_TIME = 10
# list of reducers to be used in the online job
REDUCER_LIST = [
'reconstruct_daq_scalars_reducer.py',
'reconstruct_daq_tof_reducer.py',
'reconstruct_daq_tofcalib_reducer.py',
'reconstruct_daq_ckov_reducer.py',
'reconstruct_daq_kl_reducer.py',
'reconstruct_monitor_reducer.py',
'reconstruct_daq_sci_fi_reducer.py',
]
class OnlineProcess:
"""
Wrapper for a subprocess POpen object
Wraps a subprocess with additional functionality to check memory usage and
kill the subprocess and restart it in the case of a leak. See #1328.
"""
def __init__(self, subprocess_arg_list, log_file):
"""
Set up the log file and start the subprocess
"""
self.arg_list = subprocess_arg_list
self.log_name = log_file
self.log = open(log_file, "w")
self.subproc = None
self._start_process()
def poll(self):
"""
Returns None if the process is running or the returncode if it finished
Checks memory footprint for the subprocess and restarts the process if
it exceeeds MAX_MEM_USAGE
"""
proc = self.subproc
poll_out = proc.poll()
if poll_out == None:
mem_usage = self.memory_usage()
if mem_usage > MAX_MEM_USAGE:
print mem_usage, "% of memory used for process", proc.pid, \
"which exceeds the maximum", MAX_MEM_USAGE, \
"% - restarting the process"
cleanup([self])
self._start_process()
print str(proc.pid).rjust(6), str(mem_usage).ljust(6),
else:
print '\nProcess', proc.pid, 'failed'
return poll_out
def memory_usage(self):
"""
Return the memory usage (%) of the associated subprocess
"""
ps_out = subprocess.check_output(['ps', '-p', str(self.subproc.pid),
'h', '-o%mem'])
return float(ps_out.strip(' \n'))
def _start_process(self):
"""
Start the subprocess
"""
self.subproc = subprocess.Popen(self.arg_list, \
stdout=self.log, stderr=subprocess.STDOUT)
print 'Started process with pid', self.subproc.pid, 'and log file', \
self.log_name
def poll_processes(proc_list):
"""
Poll processes in process list. Return True if processes are all running,
false if any have finished.
"""
all_ok = True
for proc in proc_list:
all_ok = all_ok and proc.poll() == None
print
return all_ok
def celeryd_process(celeryd_log_file_name):
"""
Open the celery demon process - sets up workers for MAUS to reconstruct on
"""
print 'Starting celery... ',
proc = OnlineProcess(['celeryd', '-c8', '-lINFO', '--purge'],
celeryd_log_file_name)
return proc
def maus_web_app_process(maus_web_log_file_name):
"""
Open the maus web app process - dynamically generates web pages for MAUS
output display
"""
print 'Starting maus web app...',
maus_web = os.path.join(os.environ['MAUS_WEB_DIR'], 'src/mausweb/manage.py')
proc = OnlineProcess(['python', maus_web, 'runserver', 'localhost:9000'],
maus_web_log_file_name)
return proc
def maus_input_transform_process(maus_input_log, _extra_args):
"""
Open the input transform process - runs against data and performs
reconstruction, leaving reconstructed data in a database somewhere.
"""
print 'Starting input-transform...',
maus_inp = \
os.path.join(os.environ['MAUS_ROOT_DIR'],
'bin/online/analyze_data_online_input_transform.py')
proc = OnlineProcess(['python', maus_inp,
'-mongodb_database_name='+MONGODB,
'-type_of_dataflow=multi_process_input_transform',
'-verbose_level=0',
'-DAQ_hostname=miceraid5']+_extra_args,
maus_input_log)
return proc
def maus_merge_output_process(maus_output_log, reducer_name, output_name,
_extra_args):
"""
Open the merge output process - runs against reconstructed data and collects
into a bunch of histograms.
"""
print 'Starting reducer...',
maus_red = os.path.join(os.environ['MAUS_ROOT_DIR'], 'bin/online',
reducer_name)
# strip trailing .py or whatever
root_name = string.join(reducer_name.split('.')[0:-1], '.')
if root_name == '':
root_name = reducer_name
root_name += '.root'
proc = OnlineProcess(['python', maus_red,
'-mongodb_database_name='+MONGODB,
'-type_of_dataflow=multi_process_merge_output',
'-output_json_file_name='+output_name,
'-reduce_plot_refresh_rate=60',
'-output_root_file_name='+root_name,
'-output_root_file_mode=end_of_run_file_per_run']+\
_extra_args,
maus_output_log)
return proc
def monitor_mongodb(url, database_name, file_handle):
"""
Summarise the database.
@param url URL.
@param database_name Database name or "ALL" for all.
"""
mongo = pymongo.Connection(url)
database_names = mongo.database_names()
if (database_name != "ALL"):
if (database_name not in database_names):
print >> file_handle, "Database %s not found" % database_name
return
else:
database_names = [database_name]
for database_name in database_names:
print >> file_handle, "Database: %s" % database_name,
mongodb = mongo[database_name]
collection_names = mongodb.collection_names()
if ("system.indexes" in collection_names):
collection_names.remove("system.indexes")
if (len(collection_names) == 0):
print >> file_handle, " No collections"
continue
for collection_name in collection_names:
collection = mongodb[collection_name]
validate = mongodb.validate_collection(collection_name)
if "datasize" in validate.keys():
space = validate["datasize"]
space_kb = space / 1024
space_mb = space_kb / 1024
print >> file_handle, \
" Collection: %s : %d documents (%d bytes %d Kb %d Mb)" \
% (collection_name, collection.count(), space, \
space_kb, space_mb)
file_handle.flush()
def force_kill_celeryd():
"""
celeryd likes to leave lurking subprocesses. This function searches the
process table for celeryd child process and kills it.
"""
ps_out = subprocess.check_output(['ps', '-e', '-F'])
pids = []
for line in ps_out.split('\n')[1:]:
if line.find('celeryd') > -1:
words = line.split()
pids.append(int(words[1]))
print "Found lurking celeryd process", pids[-1]
for a_pid in pids:
os.kill(a_pid, signal.SIGKILL)
print "Killed", a_pid
def force_kill_maus_web_app():
"""
maus web app spawns a child process that is pretty hard to kill. This
function searches the process table for mausweb child process and kills it.
"""
hack_stdout = os.path.join(os.environ['MAUS_ROOT_DIR'], 'tmp', 'grep.out')
fout = open(hack_stdout, 'w')
ps_proc = subprocess.Popen(['ps', '-e', '-F'], stdout=fout, \
stderr=subprocess.STDOUT)
ps_proc.wait() # pylint: disable = E1101
fout.close()
fin = open(hack_stdout)
pid = None
for line in fin.readlines():
if line.find('src/mausweb/manage.py') > -1:
words = line.split()
pid = int(words[1])
print "Found lurking maus-web-app process"
if pid != None:
os.kill(pid, signal.SIGKILL)
print "Killed", pid
def remove_lockfile():
"""
Delete the lockfile
"""
if os.path.exists(LOCKFILE):
os.remove(LOCKFILE)
print 'Cleared lockfile'
else:
print 'Strange, I lost the lockfile...'
def clear_lockfile():
"""
Clear an existing lockfile
If the script fails to exit gracefully, we leave a lock file and can leave
associated child processes running. In this case, this function kills all
child processes.
"""
if os.path.exists(LOCKFILE):
print """
Found lockfile - this may mean you have an existing session running elsewhere.
Kill existing session? (y/N)"""
sys.stdout.flush()
user_input = raw_input()
if len(user_input) == 0 or user_input[0].lower() != 'y':
# note this doesnt go through cleanup function - just exits
os.abort()
print 'Lockfile', LOCKFILE, 'found - killing processes'
fin = open(LOCKFILE)
for line in fin.readlines():
pid = int(line.rstrip('\n'))
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass # maybe the pid didn't exist
print 'Killed', pid
# maus web app spawns a child that needs special handling
force_kill_maus_web_app()
# celeryd must die
force_kill_celeryd()
time.sleep(3)
def make_lockfile(_procs):
"""
Make a lock file listing pid of this process and all children
"""
print 'Making lockfile '+LOCKFILE
fout = open(LOCKFILE, 'w')
print >> fout, os.getpid()
for proc in _procs :
print >> fout, proc.subproc.pid
fout.close()
def cleanup(_procs):
"""
Kill any subprocesses in _procs list of OnlineProcesses
"""
returncode = 0
for online_process in _procs:
process = online_process.subproc
if process.poll() == None:
print 'Attempting to kill process', str(process.pid)
process.send_signal(signal.SIGINT)
while len(_procs) > 0:
_proc_alive = []
for online_process in _procs:
process = online_process.subproc
print 'Polling process', process.pid,
if process.poll() == None:
print '... process did not die - it is still working '+\
'(check the log file)'
_proc_alive.append(online_process)
else:
print '... process', str(process.pid), \
'is dead with return code', str(process.returncode)
returncode = process.returncode
sys.stdout.flush()
_procs = _proc_alive
time.sleep(10)
return returncode
def main():
"""
Make a lockfile; spawn child processes; and poll subprocesses until user
hits ctrl-c
If the subprocesses fail, have a go at setting up rabbitmcq and mongo
Pass any command line arguments to all MAUS processes
"""
extra_args = sys.argv[1:]
returncode = 0
try:
force_kill_maus_web_app()
force_kill_celeryd()
clear_lockfile()
log_dir = os.environ['MAUS_WEB_MEDIA_RAW']
celery_log = os.path.join(log_dir, 'celeryd.log')
maus_web_log = os.path.join(log_dir, 'maus-web.log')
input_log = os.path.join(log_dir, 'maus-input-transform.log')
debug_json = os.path.join(log_dir, 'reconstruct_monitor_reducer.json')
PROCESSES.append(celeryd_process(celery_log))
PROCESSES.append(maus_web_app_process(maus_web_log))
PROCESSES.append(maus_input_transform_process(input_log, extra_args))
for reducer in REDUCER_LIST:
reduce_log = os.path.join(log_dir, reducer[0:-3]+'.log')
PROCESSES.append(maus_merge_output_process(reduce_log,
reducer, debug_json, extra_args))
make_lockfile(PROCESSES)
print '\nCTRL-C to quit\n'
mongo_log = open(os.path.join(log_dir, 'mongodb.log'), 'w')
while poll_processes(PROCESSES):
monitor_mongodb("localhost:27017", MONGODB, mongo_log)
sys.stdout.flush()
sys.stderr.flush()
time.sleep(POLL_TIME)
except KeyboardInterrupt:
print "Closing"
except Exception:
sys.excepthook(*sys.exc_info())
returncode = 1
finally:
returncode = cleanup(PROCESSES)+returncode
remove_lockfile()
sys.exit(returncode)
if __name__ == "__main__":
main()
| gpl-3.0 | 3,474,413,742,253,010,000 | 35.019704 | 80 | 0.594434 | false |
verilylifesciences/purplequery | purplequery/statement_grammar.py | 1 | 1486 | # Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Grammar for BigQuery statements."""
from .grammar import expression, query_expression
from .query_helper import separated_sequence
from .statements import CreateTable, CreateView
from .terminals import grammar_literal, identifier
from .type_grammar import bigquery_type
statement = [
(CreateTable,
[grammar_literal('CREATE', 'TABLE', 'IF', 'NOT', 'EXISTS'),
grammar_literal('CREATE', 'TABLE'),
grammar_literal('CREATE', 'OR', 'REPLACE', 'TABLE')],
separated_sequence(identifier, '.'),
[('(', separated_sequence((identifier, bigquery_type), ','), ')'),
None],
# PARTITION BY not implemented
# CLUSTER BY not implemented
[('OPTIONS', '(',
[separated_sequence((identifier, '=', expression), ','), None],
')'),
None],
[('AS', '(', query_expression, ')'),
None],
),
(CreateView,
[grammar_literal('CREATE', 'VIEW', 'IF', 'NOT', 'EXISTS'),
grammar_literal('CREATE', 'VIEW'),
grammar_literal('CREATE', 'OR', 'REPLACE', 'VIEW')],
separated_sequence(identifier, '.'),
[('OPTIONS', '(',
[separated_sequence((identifier, '=', expression), ','), None],
')'),
None],
'AS',
'(', query_expression, ')'
),
]
bigquery_statement = ([statement, query_expression],
[';', None])
| bsd-3-clause | -110,670,517,762,715,200 | 31.304348 | 71 | 0.595559 | false |
palankai/dddcrud | app/handlers.py | 1 | 1175 | from .events import ContactCreated, ContactUpdated, ContactDeleted
from .policy import policy, PhoneNumberLongEnough
@policy(PhoneNumberLongEnough())
class CreateContactHandler:
def __init__(self, repo, bus):
self.repo = repo
self.bus = bus
def __call__(self, cmd):
self.repo.create(cmd.name, cmd.phone)
self.bus.publish(ContactCreated(name=cmd.name, phone=cmd.phone))
class UpdateContactHandler:
def __init__(self, repo, bus):
self.repo = repo
self.bus = bus
def __call__(self, cmd):
self.repo.update(cmd.name, cmd.phone)
self.bus.publish(ContactUpdated(name=cmd.name, phone=cmd.phone))
class DeleteContactHandler:
def __init__(self, repo, bus):
self.repo = repo
self.bus = bus
def __call__(self, cmd):
self.repo.delete(cmd.name)
self.bus.publish(ContactDeleted(name=cmd.name))
class ReadContactHandler:
def __init__(self, repo, bus):
self.repo = repo
self.bus = bus
def __call__(self, cmd):
return self.repo.read(cmd.name)
class ContactEventsHandler:
def __call__(self, event):
print(event)
| mit | -8,243,245,142,680,655,000 | 22.039216 | 72 | 0.629787 | false |
servetus/pycofi | pycofi/queries.py | 1 | 1056 | from numpy import matrix, asarray, apply_along_axis
from numpy.linalg import norm
import simplejson
def get_top_items(user_index, items, Theta, X, item_mean=None, R_filter=None):
if R_filter == None:
R_filter = set([])
scores = X[user_index,:] * Theta
if(item_mean != None):
scores = matrix(item_mean).T + scores
item_scores = zip(items, asarray(scores)[0] )
item_scores = filter( lambda x: x[0]['id'] not in R_filter, item_scores)
item_scores = sorted( item_scores, key= lambda x: x[1], reverse=True )
return item_scores
def get_similar_items(item_index, items, Theta, R_filter=None):
if R_filter == None:
R_filter = set([])
target_item_params = Theta[:,item_index]
diff = Theta - target_item_params
similarity = apply_along_axis( norm, 0,diff)
item_scores = zip(items, asarray(similarity) )
item_scores = filter( lambda x: x[0]['id'] not in R_filter, item_scores)
item_scores = sorted( item_scores, key= lambda x: x[1] )
return item_scores
| mit | 5,900,448,722,124,836,000 | 24.756098 | 78 | 0.632576 | false |
igurrutxaga/tvalacarta | python/main-classic/platformcode/xbmctools.py | 1 | 44101 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# XBMC Tools
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urllib, urllib2
import xbmc
import xbmcgui
import xbmcplugin
import sys
import os
from servers import servertools
from core import config
from core import logger
from core.item import Item
from core import suscription
# Esto permite su ejecución en modo emulado
try:
pluginhandle = int( sys.argv[ 1 ] )
except:
pluginhandle = ""
DEBUG = False
def add_new_folder( item , totalItems=0 ):
if item.fulltitle=="":
item.fulltitle=item.title
contextCommands = []
ok = False
try:
item.context = urllib.unquote_plus(item.context)
except:
item.context=""
if "|" in item.context:
item.context = item.context.split("|")
if DEBUG:
try:
logger.info('[xbmctools.py] addnewfolder( "'+item.channel+'" , "'+item.action+'" , "'+item.category+'" , "'+item.date+'", "'+item.title+'" , "' + url + '" , "'+item.thumbnail+'" , "'+item.plot+'")" , "'+item.show+'")"')
except:
logger.info('[xbmctools.py] addnewfolder(<unicode>)')
listitem = xbmcgui.ListItem( item.title, iconImage="DefaultFolder.png", thumbnailImage=item.thumbnail )
listitem.setInfo( "video", { "Title" : item.title, "Plot" : item.plot, "Studio" : item.channel, "Genre" : item.category, "Year" : item.date[0:4] } )
if item.fanart!="":
listitem.setProperty('fanart_image',item.fanart)
xbmcplugin.setPluginFanart(pluginhandle, item.fanart)
#Realzamos un quote sencillo para evitar problemas con títulos unicode
# title = title.replace("&","%26").replace("+","%2B").replace("%","%25")
try:
item.title = item.title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are.
except:
pass
itemurl = '%s?channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&show=%s' % ( sys.argv[ 0 ] , item.channel , item.action , urllib.quote_plus( item.category ) , urllib.quote_plus(item.title) , urllib.quote_plus(item.fulltitle) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( item.plot ) , urllib.quote_plus( item.extra ) , urllib.quote_plus( item.show ))
logger.info("itemurl="+itemurl)
if item.context=="program":
if item.is_favorite=="true":
query = '%s?channel=api_programas&action=remove_from_favorites&url=%s' % ( sys.argv[ 0 ] , item.id)
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Quitar programa de favoritos",command) )
else:
query = '%s?channel=api_programas&action=add_to_favorites&url=%s' % ( sys.argv[ 0 ] , item.id)
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Añadir programa a favoritos",command) )
#query = '%s?channel=api_programas&action=add_to_hidden&url=%s' % ( sys.argv[ 0 ] , item.id)
#command = "XBMC.RunPlugin("+query+")"
#contextCommands.append( ("Ocultar este programa",command) )
if not suscription.already_suscribed(item):
query = '%s?channel=%s&action=subscribe_to_program&title=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&show=%s' % ( sys.argv[ 0 ] , item.channel , urllib.quote_plus(item.title) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( item.plot ) , urllib.quote_plus( item.extra ) , urllib.quote_plus( item.title ))
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Suscribirme a este programa" , command) )
else:
query = '%s?channel=%s&action=unsubscribe_to_program&title=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&show=%s' % ( sys.argv[ 0 ] , item.channel , urllib.quote_plus(item.title) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( item.plot ) , urllib.quote_plus( item.extra ) , urllib.quote_plus( item.title ))
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Quitar suscripción a este programa" , command) )
query = '%s?channel=%s&action=download_all_videos&title=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&show=%s' % ( sys.argv[ 0 ] , item.channel , urllib.quote_plus(item.title) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( item.plot ) , urllib.quote_plus( item.extra ) , urllib.quote_plus( item.title ))
command = "XBMC.RunPlugin("+query+")"
contextCommands.append( ("Descargar todos los vídeos" , command) )
#elif item.context=="hidden_program":
# query = '%s?channel=api_programas&action=remove_from_hidden&url=%s' % ( sys.argv[ 0 ] , item.id)
# command = "XBMC.RunPlugin("+query+")"
# contextCommands.append( ("No ocultar este programa",command) )
if config.get_platform()=="boxee":
#logger.info("Modo boxee")
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
else:
#logger.info("Modo xbmc")
if len(contextCommands) > 0:
listitem.addContextMenuItems ( contextCommands, replaceItems=True)
if totalItems == 0:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
else:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True, totalItems=totalItems)
return ok
def addnewvideo( canal , accion , category , server , title , url , thumbnail, plot ,Serie="",duration="",fanart="",IsPlayable='false',context = "", subtitle="", viewmode="", totalItems = 0, show="", password="", extra="",fulltitle="", size=""):
contextCommands = []
ok = False
try:
context = urllib.unquote_plus(context)
except:
context=""
if "|" in context:
context = context.split("|")
if DEBUG:
try:
logger.info('[xbmctools.py] addnewvideo( "'+canal+'" , "'+accion+'" , "'+category+'" , "'+server+'" , "'+title+'" , "' + url + '" , "'+thumbnail+'" , "'+plot+'")" , "'+Serie+'")"')
except:
logger.info('[xbmctools.py] addnewvideo(<unicode>)')
listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail )
listitem.setInfo( "video", { "Title" : title, "Plot" : plot, "Duration" : duration, "Studio" : canal, "Genre" : category , "Size": size} )
if fanart!="":
logger.info("fanart :%s" %fanart)
listitem.setProperty('fanart_image',fanart)
xbmcplugin.setPluginFanart(pluginhandle, fanart)
if IsPlayable == 'true': #Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl()
listitem.setProperty('IsPlayable', 'true')
#listitem.setProperty('fanart_image',os.path.join(IMAGES_PATH, "cinetube.png"))
if "1" in context: #El uno añade al menu contextual la opcion de guardar en megalive un canal a favoritos
addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "saveChannelFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus(show), urllib.quote_plus( password) , urllib.quote_plus(extra) )
contextCommands.append((config.get_localized_string(30301),addItemCommand))
if "2" in context:#El dos añade al menu contextual la opciones de eliminar y/o renombrar un canal en favoritos
addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "deleteSavedChannel" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus( show), urllib.quote_plus( password) , urllib.quote_plus(extra) )
contextCommands.append((config.get_localized_string(30302),addItemCommand))
addItemCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&show=%s&password=%s&extradata=%s)" % ( sys.argv[ 0 ] , canal , "renameChannelTitle" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie, urllib.quote_plus( show),urllib.quote_plus( password) , urllib.quote_plus(extra) )
contextCommands.append((config.get_localized_string(30303),addItemCommand))
if "6" in context:# Ver canal en vivo en justintv
justinCommand = "XBMC.PlayMedia(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "playVideo" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) )
contextCommands.append((config.get_localized_string(30410),justinCommand))
if "7" in context:# Listar videos archivados en justintv
justinCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "listarchives" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) )
contextCommands.append((config.get_localized_string(30409),justinCommand))
if "8" in context:# Añadir canal a favoritos justintv
justinCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "addToFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) )
contextCommands.append((config.get_localized_string(30406),justinCommand))
if "9" in context:# Remover canal de favoritos justintv
justinCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "removeFromFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) )
contextCommands.append((config.get_localized_string(30407),justinCommand))
if len (contextCommands) > 0:
listitem.addContextMenuItems ( contextCommands, replaceItems=False)
try:
title = title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are.
plot = plot.encode ("utf-8")
except:
pass
itemurl = '%s?channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&server=%s&Serie=%s&subtitle=%s&show=%s&viewmode=%s&extradata=%s' % ( sys.argv[ 0 ] , canal , accion , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , server , Serie , urllib.quote_plus(subtitle), urllib.quote_plus( show ) ,urllib.quote_plus(viewmode), urllib.quote_plus(extra) )
#logger.info("[xbmctools.py] itemurl=%s" % itemurl)
if totalItems == 0:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url=itemurl, listitem=listitem, isFolder=False)
else:
ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url=itemurl, listitem=listitem, isFolder=False, totalItems=totalItems)
return ok
def addthumbnailfolder( canal , scrapedtitle , scrapedurl , scrapedthumbnail , accion ):
logger.info('[xbmctools.py] addthumbnailfolder( "'+scrapedtitle+'" , "' + scrapedurl + '" , "'+scrapedthumbnail+'" , "'+accion+'")"')
listitem = xbmcgui.ListItem( scrapedtitle, iconImage="DefaultFolder.png", thumbnailImage=scrapedthumbnail )
itemurl = '%s?channel=%s&action=%s&category=%s&url=%s&title=%s&thumbnail=%s' % ( sys.argv[ 0 ] , canal , accion , urllib.quote_plus( scrapedtitle ) , urllib.quote_plus( scrapedurl ) , urllib.quote_plus( scrapedtitle ) , urllib.quote_plus( scrapedthumbnail ) )
xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
def addfolder( canal , nombre , url , accion ):
logger.info('[xbmctools.py] addfolder( "'+nombre+'" , "' + url + '" , "'+accion+'")"')
listitem = xbmcgui.ListItem( nombre , iconImage="DefaultFolder.png")
itemurl = '%s?channel=%s&action=%s&category=%s&url=%s' % ( sys.argv[ 0 ] , canal , accion , urllib.quote_plus(nombre) , urllib.quote_plus(url) )
xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True)
def addvideo( canal , nombre , url , category , server , Serie=""):
logger.info('[xbmctools.py] addvideo( "'+nombre+'" , "' + url + '" , "'+server+ '" , "'+Serie+'")"')
listitem = xbmcgui.ListItem( nombre, iconImage="DefaultVideo.png" )
listitem.setInfo( "video", { "Title" : nombre, "Plot" : nombre } )
itemurl = '%s?channel=%s&action=play&category=%s&url=%s&server=%s&title=%s&Serie=%s' % ( sys.argv[ 0 ] , canal , category , urllib.quote_plus(url) , server , urllib.quote_plus( nombre ) , Serie)
xbmcplugin.addDirectoryItem( handle=pluginhandle, url=itemurl, listitem=listitem, isFolder=False)
# FIXME: ¿Por qué no pasar el item en lugar de todos los parámetros?
def play_video(channel="",server="",url="",category="",title="", thumbnail="",plot="",extra="",desdefavoritos=False,desdedescargados=False,desderrordescargas=False,strmfile=False,Serie="",subtitle="", video_password="",fulltitle=""):
from servers import servertools
import sys
import xbmcgui
if url.startswith("http://"):
url = url.replace(" ","%20")
try:
logger.info("[xbmctools.py] play_video(channel=%s, server=%s, url=%s, category=%s, title=%s, thumbnail=%s, plot=%s, desdefavoritos=%s, desdedescargados=%s, desderrordescargas=%s, strmfile=%s, Serie=%s, subtitle=%s" % (channel,server,url,category,title,thumbnail,plot,desdefavoritos,desdedescargados,desderrordescargas,strmfile,Serie,subtitle))
except:
pass
try:
server = server.lower()
except:
server = ""
if server=="":
server="directo"
try:
from core import descargas
download_enable=True
except:
download_enable=False
view = False
# Abre el diálogo de selección
opciones = []
default_action = config.get_setting("default_action")
logger.info("default_action="+default_action)
# Si no es el modo normal, no muestra el diálogo porque cuelga XBMC
muestra_dialogo = (config.get_setting("player_mode")=="0" and not strmfile)
# Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo
video_urls,puedes,motivo = servertools.resolve_video_urls_for_playing(server,url,video_password,muestra_dialogo)
# Si puedes ver el vídeo, presenta las opciones
if puedes:
for video_url in video_urls:
opciones.append(config.get_localized_string(30151) + " " + video_url[0])
if server=="local":
opciones.append(config.get_localized_string(30164))
else:
if download_enable:
opcion = config.get_localized_string(30153)
opciones.append(opcion) # "Descargar"
if channel=="favoritos":
opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
else:
opciones.append(config.get_localized_string(30155)) # "Añadir a favoritos"
#if not strmfile:
# opciones.append(config.get_localized_string(30161)) # "Añadir a Biblioteca"
if download_enable:
if channel!="descargas":
opciones.append(config.get_localized_string(30157)) # "Añadir a lista de descargas"
else:
if category=="errores":
opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
opciones.append(config.get_localized_string(30160)) # "Pasar de nuevo a lista de descargas"
else:
opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
#opciones.append(config.get_localized_string(30158)) # "Enviar a JDownloader"
if default_action=="3":
seleccion = len(opciones)-1
# Busqueda de trailers en youtube
#if not channel in ["Trailer","ecarteleratrailers"]:
# opciones.append(config.get_localized_string(30162)) # "Buscar Trailer"
# Si no puedes ver el vídeo te informa
else:
import xbmcgui
if server!="":
advertencia = xbmcgui.Dialog()
if "<br/>" in motivo:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo.split("<br/>")[0],motivo.split("<br/>")[1],url)
else:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo,url)
else:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...","El servidor donde está alojado no está","soportado en pelisalacarta todavía",url)
if channel=="favoritos":
opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
if channel=="descargas":
if category=="errores":
opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
else:
opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
if len(opciones)==0:
return
# Si la accion por defecto es "Preguntar", pregunta
if default_action=="0":
import xbmcgui
dia = xbmcgui.Dialog()
seleccion = dia.select(config.get_localized_string(30163), opciones) # "Elige una opción"
#dia.close()
elif default_action=="1":
seleccion = 0
elif default_action=="2":
seleccion = len(video_urls)-1
elif default_action=="3":
seleccion = seleccion
else:
seleccion=0
logger.info("seleccion=%d" % seleccion)
logger.info("seleccion=%s" % opciones[seleccion])
# No ha elegido nada, lo más probable porque haya dado al ESC
if seleccion==-1:
#Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
import sys
xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),False,listitem) # JUR Added
#if config.get_setting("subtitulo") == "true":
# config.set_setting("subtitulo", "false")
return
if opciones[seleccion]==config.get_localized_string(30158): # "Enviar a JDownloader"
#d = {"web": url}urllib.urlencode(d)
from core import scrapertools
data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+url+ " " +thumbnail)
return
elif opciones[seleccion]==config.get_localized_string(30164): # Borrar archivo en descargas
# En "extra" está el nombre del fichero en favoritos
import os
os.remove( url )
if os.path.exists(url[:-4]+".tbn"):
os.remove( url[:-4]+".tbn" )
if os.path.exists(url[:-4]+".nfo"):
os.remove( url[:-4]+".nfo" )
xbmc.executebuiltin( "Container.Refresh" )
return
# Ha elegido uno de los vídeos
elif seleccion < len(video_urls):
mediaurl = video_urls[seleccion][1]
if len(video_urls[seleccion])>2:
wait_time = video_urls[seleccion][2]
else:
wait_time = 0
if len(video_urls[seleccion])>3:
use_download_and_play = (video_urls[seleccion][3]=="download_and_play")
else:
use_download_and_play = False
view = True
# Descargar
elif opciones[seleccion]==config.get_localized_string(30153): # "Descargar"
# El vídeo de más calidad es el último
mediaurl = video_urls[len(video_urls)-1][1]
# Si no quiere usar filenium para descargas, coge el anterior
if config.get_setting("filenium_for_download")=="false" and video_urls[len(video_urls)-1][0]=="[filenium]":
mediaurl = video_urls[len(video_urls)-2][1]
from core import downloadtools
keyboard = xbmc.Keyboard(fulltitle)
keyboard.doModal()
if (keyboard.isConfirmed()):
title = keyboard.getText()
downloadtools.downloadtitle(mediaurl,title)
return
elif opciones[seleccion]==config.get_localized_string(30154): #"Quitar de favoritos"
from core import favoritos
# En "extra" está el nombre del fichero en favoritos
favoritos.deletebookmark(urllib.unquote_plus( extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30105)) # 'Se ha quitado de favoritos'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30159): #"Borrar descarga definitivamente"
from core import descargas
descargas.delete_error_bookmark(urllib.unquote_plus( extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30106)) # 'Se ha quitado de la lista'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30160): #"Pasar de nuevo a lista de descargas":
from core import descargas
descargas.mover_descarga_error_a_pendiente(urllib.unquote_plus( extra ))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30107)) # 'Ha pasado de nuevo a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30155): #"Añadir a favoritos":
from core import favoritos
from core import downloadtools
keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(fulltitle)+" ["+channel+"]")
keyboard.doModal()
if keyboard.isConfirmed():
title = keyboard.getText()
favoritos.savebookmark(titulo=title,url=url,thumbnail=thumbnail,server=server,plot=plot,fulltitle=title)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30108)) # 'se ha añadido a favoritos'
return
elif opciones[seleccion]==config.get_localized_string(30156): #"Quitar de lista de descargas":
from core import descargas
# La categoría es el nombre del fichero en la lista de descargas
descargas.deletebookmark((urllib.unquote_plus( extra )))
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30106)) # 'Se ha quitado de lista de descargas'
xbmc.executebuiltin( "Container.Refresh" )
return
elif opciones[seleccion]==config.get_localized_string(30157): #"Añadir a lista de descargas":
from core import descargas
from core import downloadtools
keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(fulltitle))
keyboard.doModal()
if keyboard.isConfirmed():
title = keyboard.getText()
descargas.savebookmark(titulo=title,url=url,thumbnail=thumbnail,server=server,plot=plot,fulltitle=title)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , title , config.get_localized_string(30109)) # 'se ha añadido a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30161): #"Añadir a Biblioteca": # Library
from platformcode.xbmc import library
titulo = fulltitle
if fulltitle=="":
titulo = title
library.savelibrary(titulo,url,thumbnail,server,plot,canal=channel,category=category,Serie=Serie)
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok(config.get_localized_string(30101) , fulltitle , config.get_localized_string(30135)) # 'se ha añadido a la lista de descargas'
return
elif opciones[seleccion]==config.get_localized_string(30162): #"Buscar Trailer":
config.set_setting("subtitulo", "false")
import sys
xbmc.executebuiltin("Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( category ) , urllib.quote_plus( fulltitle ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) , server ))
return
# Si no hay mediaurl es porque el vídeo no está :)
logger.info("[xbmctools.py] mediaurl="+mediaurl)
if mediaurl=="":
logger.info("b1")
if server == "unknown":
alertUnsopportedServer()
else:
alertnodisponibleserver(server)
return
# Si hay un tiempo de espera (como en megaupload), lo impone ahora
if wait_time>0:
logger.info("b2")
continuar = handle_wait(wait_time,server,"Cargando vídeo...")
if not continuar:
return
# Obtención datos de la Biblioteca (solo strms que estén en la biblioteca)
import xbmcgui
if strmfile:
logger.info("b3")
xlistitem = getLibraryInfo(mediaurl)
else:
logger.info("b4")
try:
xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail, path=mediaurl)
except:
xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
xlistitem.setInfo( "video", { "Title": title, "Plot" : plot , "Studio" : channel , "Genre" : category } )
# Descarga el subtitulo
if channel=="cuevana" and subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
logger.info("b5")
try:
import os
ficherosubtitulo = os.path.join( config.get_data_path(), 'subtitulo.srt' )
if os.path.exists(ficherosubtitulo):
try:
os.remove(ficherosubtitulo)
except IOError:
logger.info("Error al eliminar el archivo subtitulo.srt "+ficherosubtitulo)
raise
from core import scrapertools
data = scrapertools.cache_page(subtitle)
#print data
fichero = open(ficherosubtitulo,"w")
fichero.write(data)
fichero.close()
#from core import downloadtools
#downloadtools.downloadfile(subtitle, ficherosubtitulo )
except:
logger.info("Error al descargar el subtítulo")
# Lanza el reproductor
if strmfile: #Si es un fichero strm no hace falta el play
logger.info("b6")
import sys
xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),True,xlistitem)
#if subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
# logger.info("[xbmctools.py] Con subtitulos")
# setSubtitles()
else:
logger.info("b7")
if use_download_and_play or config.get_setting("player_mode")=="3":
logger.info("b11")
import download_and_play
# El canal exige usar download_and_play, pero el usuario no lo ha elegido -> le quitamos los diálogos
if use_download_and_play and config.get_setting("player_mode")!="3":
download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath") , show_dialog=False )
else:
download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath") )
return
elif config.get_setting("player_mode")=="0":
logger.info("b8")
# Añadimos el listitem a una lista de reproducción (playlist)
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
playlist.add( mediaurl, xlistitem )
# Reproduce
playersettings = config.get_setting('player_type')
logger.info("[xbmctools.py] playersettings="+playersettings)
player_type = xbmc.PLAYER_CORE_AUTO
if playersettings == "0":
player_type = xbmc.PLAYER_CORE_AUTO
logger.info("[xbmctools.py] PLAYER_CORE_AUTO")
elif playersettings == "1":
player_type = xbmc.PLAYER_CORE_MPLAYER
logger.info("[xbmctools.py] PLAYER_CORE_MPLAYER")
elif playersettings == "2":
player_type = xbmc.PLAYER_CORE_DVDPLAYER
logger.info("[xbmctools.py] PLAYER_CORE_DVDPLAYER")
xbmcPlayer = xbmc.Player( player_type )
xbmcPlayer.play(playlist)
if channel=="cuevana" and subtitle!="":
logger.info("subtitulo="+subtitle)
if subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
logger.info("[xbmctools.py] Con subtitulos")
setSubtitles()
elif config.get_setting("player_mode")=="1":
logger.info("b9")
#xlistitem.setProperty('IsPlayable', 'true')
#xlistitem.setProperty('path', mediaurl)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=mediaurl))
elif config.get_setting("player_mode")=="2":
logger.info("b10")
xbmc.executebuiltin( "PlayMedia("+mediaurl+")" )
if (config.get_setting("subtitulo") == "true") and view:
logger.info("b11")
from core import subtitletools
wait2second()
subtitletools.set_Subtitle()
if subtitle!="":
xbmc.Player().setSubtitles(subtitle)
def handle_wait(time_to_wait,title,text):
logger.info ("[xbmctools.py] handle_wait(time_to_wait=%d)" % time_to_wait)
import xbmc,xbmcgui
espera = xbmcgui.DialogProgress()
ret = espera.create(' '+title)
secs=0
percent=0
increment = int(100 / time_to_wait)
cancelled = False
while secs < time_to_wait:
secs = secs + 1
percent = increment*secs
secs_left = str((time_to_wait - secs))
remaining_display = ' Espera '+secs_left+' segundos para que comience el vídeo...'
espera.update(percent,' '+text,remaining_display)
xbmc.sleep(1000)
if (espera.iscanceled()):
cancelled = True
break
if cancelled == True:
logger.info ('Espera cancelada')
return False
else:
logger.info ('Espera finalizada')
return True
def getLibraryInfo (mediaurl):
'''Obtiene información de la Biblioteca si existe (ficheros strm) o de los parámetros
'''
if DEBUG:
logger.info('[xbmctools.py] playlist OBTENCIÓN DE DATOS DE BIBLIOTECA')
# Información básica
label = xbmc.getInfoLabel( 'listitem.label' )
label2 = xbmc.getInfoLabel( 'listitem.label2' )
iconImage = xbmc.getInfoImage( 'listitem.icon' )
thumbnailImage = xbmc.getInfoImage( 'listitem.Thumb' ) #xbmc.getInfoLabel( 'listitem.thumbnailImage' )
if DEBUG:
logger.info ("[xbmctools.py]getMediaInfo: label = " + label)
logger.info ("[xbmctools.py]getMediaInfo: label2 = " + label2)
logger.info ("[xbmctools.py]getMediaInfo: iconImage = " + iconImage)
logger.info ("[xbmctools.py]getMediaInfo: thumbnailImage = " + thumbnailImage)
# Creación de listitem
listitem = xbmcgui.ListItem(label, label2, iconImage, thumbnailImage, mediaurl)
# Información adicional
lista = [
('listitem.genre', 's'), #(Comedy)
('listitem.year', 'i'), #(2009)
('listitem.episode', 'i'), #(4)
('listitem.season', 'i'), #(1)
('listitem.top250', 'i'), #(192)
('listitem.tracknumber', 'i'), #(3)
('listitem.rating', 'f'), #(6.4) - range is 0..10
# ('listitem.watched', 'd'), # depreciated. use playcount instead
('listitem.playcount', 'i'), #(2) - number of times this item has been played
# ('listitem.overlay', 'i'), #(2) - range is 0..8. See GUIListItem.h for values
('listitem.overlay', 's'), #JUR - listitem devuelve un string, pero addinfo espera un int. Ver traducción más abajo
('listitem.cast', 's'), # (Michal C. Hall) - List concatenated into a string
('listitem.castandrole', 's'), #(Michael C. Hall|Dexter) - List concatenated into a string
('listitem.director', 's'), #(Dagur Kari)
('listitem.mpaa', 's'), #(PG-13)
('listitem.plot', 's'), #(Long Description)
('listitem.plotoutline', 's'), #(Short Description)
('listitem.title', 's'), #(Big Fan)
('listitem.duration', 's'), #(3)
('listitem.studio', 's'), #(Warner Bros.)
('listitem.tagline', 's'), #(An awesome movie) - short description of movie
('listitem.writer', 's'), #(Robert D. Siegel)
('listitem.tvshowtitle', 's'), #(Heroes)
('listitem.premiered', 's'), #(2005-03-04)
('listitem.status', 's'), #(Continuing) - status of a TVshow
('listitem.code', 's'), #(tt0110293) - IMDb code
('listitem.aired', 's'), #(2008-12-07)
('listitem.credits', 's'), #(Andy Kaufman) - writing credits
('listitem.lastplayed', 's'), #(%Y-%m-%d %h
('listitem.album', 's'), #(The Joshua Tree)
('listitem.votes', 's'), #(12345 votes)
('listitem.trailer', 's'), #(/home/user/trailer.avi)
]
# Obtenemos toda la info disponible y la metemos en un diccionario
# para la función setInfo.
infodict = dict()
for label,tipo in lista:
key = label.split('.',1)[1]
value = xbmc.getInfoLabel( label )
if value != "":
if DEBUG:
logger.info ("[xbmctools.py]getMediaInfo: "+key+" = " + value) #infoimage=infolabel
if tipo == 's':
infodict[key]=value
elif tipo == 'i':
infodict[key]=int(value)
elif tipo == 'f':
infodict[key]=float(value)
#Transforma el valor de overlay de string a int.
if infodict.has_key('overlay'):
value = infodict['overlay'].lower()
if value.find('rar') > -1:
infodict['overlay'] = 1
elif value.find('zip')> -1:
infodict['overlay'] = 2
elif value.find('trained')> -1:
infodict['overlay'] = 3
elif value.find('hastrainer')> -1:
infodict['overlay'] = 4
elif value.find('locked')> -1:
infodict['overlay'] = 5
elif value.find('unwatched')> -1:
infodict['overlay'] = 6
elif value.find('watched')> -1:
infodict['overlay'] = 7
elif value.find('hd')> -1:
infodict['overlay'] = 8
else:
infodict.pop('overlay')
if len (infodict) > 0:
listitem.setInfo( "video", infodict )
return listitem
def alertnodisponible():
advertencia = xbmcgui.Dialog()
#'Vídeo no disponible'
#'No se han podido localizar videos en la página del canal'
resultado = advertencia.ok(config.get_localized_string(30055) , config.get_localized_string(30056))
def alertnodisponibleserver(server):
advertencia = xbmcgui.Dialog()
# 'El vídeo ya no está en %s' , 'Prueba en otro servidor o en otro canal'
resultado = advertencia.ok( config.get_localized_string(30055),(config.get_localized_string(30057)%server),config.get_localized_string(30058))
def alertUnsopportedServer():
advertencia = xbmcgui.Dialog()
# 'Servidor no soportado o desconocido' , 'Prueba en otro servidor o en otro canal'
resultado = advertencia.ok( config.get_localized_string(30065),config.get_localized_string(30058))
def alerterrorpagina():
advertencia = xbmcgui.Dialog()
#'Error en el sitio web'
#'No se puede acceder por un error en el sitio web'
resultado = advertencia.ok(config.get_localized_string(30059) , config.get_localized_string(30060))
def alertanomegauploadlow(server):
advertencia = xbmcgui.Dialog()
#'La calidad elegida no esta disponible', 'o el video ha sido borrado',
#'Prueba a reproducir en otra calidad'
resultado = advertencia.ok( config.get_localized_string(30055) , config.get_localized_string(30061) , config.get_localized_string(30062))
# AÑADIDO POR JUR. SOPORTE DE FICHEROS STRM
def playstrm(params,url,category):
'''Play para videos en ficheros strm
'''
logger.info("[xbmctools.py] playstrm url="+url)
title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "utf-8" )
thumbnail = urllib.unquote_plus( params.get("thumbnail") )
plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )
server = params["server"]
if (params.has_key("Serie")):
serie = params.get("Serie")
else:
serie = ""
if (params.has_key("subtitle")):
subtitle = params.get("subtitle")
else:
subtitle = ""
from core.item import Item
from core.subtitletools import saveSubtitleName
item = Item(title=title,show=serie)
saveSubtitleName(item)
play_video("Biblioteca pelisalacarta",server,url,category,title,thumbnail,plot,strmfile=True,Serie=serie,subtitle=subtitle)
def renderItems(itemlist, params, url, category,isPlayable='false'):
viewmode = "list"
if itemlist <> None:
for item in itemlist:
if item.category == "":
item.category = category
if item.fulltitle=="":
item.fulltitle=item.title
if item.fanart=="":
channel_fanart = os.path.join( config.get_runtime_path(), 'resources', 'images', 'fanart', item.channel+'.jpg')
if os.path.exists(channel_fanart):
item.fanart = channel_fanart
else:
item.fanart = os.path.join(config.get_runtime_path(),"fanart.jpg")
if item.folder :
add_new_folder( item , totalItems=len(itemlist) )
else:
if config.get_setting("player_mode")=="1": # SetResolvedUrl debe ser siempre "isPlayable = true"
isPlayable = "true"
if item.duration:
addnewvideo( item.channel , item.action , item.category , item.server, item.title , item.url , item.thumbnail , item.plot , "" , duration = item.duration , fanart = item.fanart, IsPlayable=isPlayable,context = item.context , subtitle=item.subtitle, totalItems = len(itemlist), show=item.show, password = item.password, extra = item.extra, fulltitle=item.fulltitle, size=item.size )
else:
addnewvideo( item.channel , item.action , item.category , item.server, item.title , item.url , item.thumbnail , item.plot, fanart = item.fanart, IsPlayable=isPlayable , context = item.context , subtitle = item.subtitle , totalItems = len(itemlist), show=item.show , password = item.password , extra=item.extra, fulltitle=item.fulltitle, size=item.size )
if item.viewmode!="list":
viewmode = item.viewmode
# Cierra el directorio
xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
if config.get_setting("forceview")=="true":
if viewmode=="list":
xbmcplugin.setContent(pluginhandle,"Movies")
xbmc.executebuiltin("Container.SetViewMode(50)")
elif viewmode=="movie_with_plot":
xbmcplugin.setContent(pluginhandle,"Movies")
xbmc.executebuiltin("Container.SetViewMode(504)")
elif viewmode=="movie":
xbmcplugin.setContent(pluginhandle,"Movies")
xbmc.executebuiltin("Container.SetViewMode(500)")
elif viewmode=="series":
xbmcplugin.setContent(pluginhandle,"tvshows")
xbmc.executebuiltin("Container.SetViewMode(504)")
elif viewmode=="episodes":
xbmcplugin.setContent(pluginhandle,"episodes")
xbmc.executebuiltin("Container.SetViewMode(504)")
xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def wait2second():
logger.info("[xbmctools.py] wait2second")
import time
contador = 0
while xbmc.Player().isPlayingVideo()==False:
logger.info("[xbmctools.py] setSubtitles: Waiting 2 seconds for video to start before setting subtitles")
time.sleep(2)
contador = contador + 1
if contador>10:
break
def setSubtitles():
logger.info("[xbmctools.py] setSubtitles")
import time
contador = 0
while xbmc.Player().isPlayingVideo()==False:
logger.info("[xbmctools.py] setSubtitles: Waiting 2 seconds for video to start before setting subtitles")
time.sleep(2)
contador = contador + 1
if contador>10:
break
subtitlefile = os.path.join( config.get_data_path(), 'subtitulo.srt' )
logger.info("[xbmctools.py] setting subtitle file %s" % subtitlefile)
xbmc.Player().setSubtitles(subtitlefile)
def trailer(item):
logger.info("[xbmctools.py] trailer")
config.set_setting("subtitulo", "false")
import sys
xbmc.executebuiltin("XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s&server=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( item.category ) , urllib.quote_plus( item.fulltitle ) , urllib.quote_plus( item.url ) , urllib.quote_plus( item.thumbnail ) , urllib.quote_plus( "" ) ))
return
def alert_no_puedes_ver_video(server,url,motivo):
import xbmcgui
if server!="":
advertencia = xbmcgui.Dialog()
if "<br/>" in motivo:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo.split("<br/>")[0],motivo.split("<br/>")[1],url)
else:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...",motivo,url)
else:
resultado = advertencia.ok( "No puedes ver ese vídeo porque...","El servidor donde está alojado no está","soportado en pelisalacarta todavía",url)
| gpl-3.0 | 4,295,399,911,927,702,000 | 49.429553 | 514 | 0.624713 | false |
boisde/Greed_Island | openapi_doc/lazy_cat_open_api_pick_up.py | 1 | 5348 | # coding=utf-8
def auth():
"""
@apiDefine AuthHeader
@apiHeaderExample Auth-Header-Example
{
"Authorization": "token 5b42e18555c11dbf2c31403ea6b706a6"
}
@apiHeader {string} Authorization 验证身份,格式为"token <token>",注意"token"后面需要一个空格, 请联系我们取得测试token.
"""
pass
def auth_first():
"""
@api {method} 在api对接阶段,请联系我们获得测试token.之后在所有接口请求头中传入token,示例如下: 取得测试/线上token
@apiVersion 0.0.1
@apiName auth_first
@apiDescription 在开始之前,您必须联系我们取得风先生平台的服务访问token。该token将会被我们当做您的身份认证。
@apiGroup Auth
@apiUse AuthHeader
"""
pass
def api_create_pick_up_order():
"""
@api {post} /open_api/pick_up_order 取货单创建
@apiVersion 0.0.1
@apiDescription 创建一条取货单,用于调配风先生配送员进行取货.
@apiName api_create_pick_up_order
@apiGroup PickUp
@apiUse AuthHeader
@apiParam (Request Payload) {object} origin 来源平台信息
@apiParam (Request Payload) {string} origin.order_id 来源订单id
@apiParam (Request Payload) {string} [origin.create_time] 来源订单创建时间, UTC+8, 北京时间, 格式为:`"%Y-%m-%d %H:%M:%S"`
@apiParam (Request Payload) {object} sender 客户信息
@apiParam (Request Payload) {string(12)} sender.name 客户姓名
@apiParam (Request Payload) {string(11)} sender.tel 客户电话
@apiParam (Request Payload) {string(8)} sender.city 客户取货地址所属城市
@apiParam (Request Payload) {string(8)} sender.district 客户取货地址所属区
@apiParam (Request Payload) {string(64)} sender.address 客户详细取货地址
@apiParam (Request Payload) {object} receiver 收货信息
@apiParam (Request Payload) {string(12)} receiver.name 收货人姓名
@apiParam (Request Payload) {string(11)} receiver.tel 收货人电话
@apiParam (Request Payload) {string(8)} receiver.city 收货地址所属城市
@apiParam (Request Payload) {string(8)} receiver.district 收货地址所属区
@apiParam (Request Payload) {string(64)} receiver.address 详细收货地址
@apiParam (Request Payload) {string(64)} remark 配送备注
@apiParamExample {json} 请求url/body示例:
Request URL: http://123.57.40.134:5556/open_api/pick_up_order/create
Request Method: POST
Request Payload:
{
"origin_order_id": "904dcb84-81a0-4f59-bdb8-dab50baba7d2",
"sender": {
"name": "刘先生",
"tel": "13012345678",
"city": "杭州市",
"district": "滨江区",
"address": "江陵路2028号星耀城一幢301"
},
"receiver": {
"name": "杨小姐",
"tel": "812345678",
"city": "杭州市",
"district": "滨江区",
"address": "滨盛路1509号天恒大厦204"
},
"remark": "咖啡别撒了,沙拉盒不要翻。告诉杨小姐:健康养生的沙拉对身体好哦,么么哒"
}
@apiSuccessExample {json} 成功示例:
HTTP/1.1 200 OK
{
"number": "000000050023"
}
@apiSuccess {string} number 风先生运单号
@apiErrorExample {json} 失败示例:
HTTP/1.1 400 ValueError
{
"message": "order_id[904dcb84-81a0-4f59-bdb8-dab50baba7d2] duplicated."
}
@apiError (错误码) 401 Token错误
@apiError (错误码) 400 该来源平台的订单重复或其他逻辑错误.
"""
pass
def api_pick_up_order_callback():
"""
@api {post} <callback> 取货单回调
@apiVersion 0.0.1
@apiDescription 当订单状态改变时,请求预先设置好的回调地址,将订单状态的改变通知对方。如果来源平台返回失败,暂时不支持重新尝试。
@apiName api_pick_up_order_callback
@apiGroup PickUp
@apiParam {string(12)} number 风先生运单号
@apiParam {string} status 风先生运单状态
@apiParam {string} msg 运单状态变更备注
@apiParam {string} actual_time 运单状态变更时间, UTC+8, 北京时间
@apiParam {object} info 其他信息
@apiParam {string(32)} origin_order_id 来源平台原始订单id
@apiParamExample {json} 请求url/body示例:
Request URL: http://callback.your_company.com:8888/update_order?from=mrwind
Request Method: POST
Request Payload:
{
"number": "000000050023",
"status": "CREATED/ASSIGNED/PICKED_UP/FINISHED/ERROR",
"msg": "已创建/已联系配送员/配送员已取货/签收/异常",
"update_time": "2016-01-04 11:22:14",
"origin_order_id": "904dcb84-81a0-4f59-bdb8-dab50baba7d2",
}
"""
pass
| mit | 2,032,496,956,239,848,700 | 34.435484 | 114 | 0.569413 | false |
robiame/AndroidGeodata | pil/ImageColor.py | 1 | 7665 | #
# The Python Imaging Library
# $Id$
#
# map CSS3-style colour description strings to RGB
#
# History:
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-15 fl Added RGBA support
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
# 2004-07-19 fl Fixed gray/grey spelling issues
# 2009-03-05 fl Fixed rounding error in grayscale calculation
#
# Copyright (c) 2002-2004 by Secret Labs AB
# Copyright (c) 2002-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
import re, string
try:
x = int("a", 16)
except TypeError:
# python 1.5.2 doesn't support int(x,b)
str2int = string.atoi
else:
str2int = int
##
# Convert color string to RGB tuple.
#
# @param color A CSS3-style colour string.
# @return An RGB-tuple.
# @exception ValueError If the color string could not be interpreted
# as an RGB value.
def getrgb(color):
# FIXME: add RGBA support
try:
rgb = colormap[color]
except KeyError:
try:
# fall back on case-insensitive lookup
rgb = colormap[string.lower(color)]
except KeyError:
rgb = None
# found color in cache
if rgb:
if isinstance(rgb, type(())):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
# check for known string formats
m = re.match("#\w\w\w$", color)
if m:
return (
str2int(color[1]*2, 16),
str2int(color[2]*2, 16),
str2int(color[3]*2, 16)
)
m = re.match("#\w\w\w\w\w\w$", color)
if m:
return (
str2int(color[1:3], 16),
str2int(color[3:5], 16),
str2int(color[5:7], 16)
)
m = re.match("rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
str2int(m.group(1)),
str2int(m.group(2)),
str2int(m.group(3))
)
m = re.match("rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((str2int(m.group(1)) * 255) / 100.0 + 0.5),
int((str2int(m.group(2)) * 255) / 100.0 + 0.5),
int((str2int(m.group(3)) * 255) / 100.0 + 0.5)
)
m = re.match("hsl\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
raise ValueError("unknown color specifier: %r" % color)
def getcolor(color, mode):
# same as getrgb, but converts the result to the given mode
color = getrgb(color)
if mode == "RGB":
return color
if mode == "RGBA":
r, g, b = color
return r, g, b, 255
if Image.getmodebase(mode) == "L":
r, g, b = color
return (r*299 + g*587 + b*114)/1000
return color
colormap = {
# X11 colour table (from "CSS3 module: Color working draft"), with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
| mit | -670,456,465,135,821,300 | 27.144487 | 73 | 0.515982 | false |
INSEBRE/lanparty.iesebre.com | test/fcgi/fcgi.py | 1 | 15432 | #!/usr/bin/python
#------------------------------------------------------------------------
# Copyright (c) 1998 by Total Control Software
# All Rights Reserved
#------------------------------------------------------------------------
#
# Module Name: fcgi.py
#
# Description: Handles communication with the FastCGI module of the
# web server without using the FastCGI developers kit, but
# will also work in a non-FastCGI environment, (straight CGI.)
# This module was originally fetched from someplace on the
# Net (I don't remember where and I can't find it now...) and
# has been significantly modified to fix several bugs, be more
# readable, more robust at handling large CGI data and return
# document sizes, and also to fit the model that we had previously
# used for FastCGI.
#
# WARNING: If you don't know what you are doing, don't tinker with this
# module!
#
# Creation Date: 1/30/98 2:59:04PM
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
#------------------------------------------------------------------------
import os, sys, string, socket, errno
from cStringIO import StringIO
import cgi
#---------------------------------------------------------------------------
# Set various FastCGI constants
# Maximum number of requests that can be handled
FCGI_MAX_REQS=1
FCGI_MAX_CONNS = 1
# Supported version of the FastCGI protocol
FCGI_VERSION_1 = 1
# Boolean: can this application multiplex connections?
FCGI_MPXS_CONNS=0
# Record types
FCGI_BEGIN_REQUEST = 1 ; FCGI_ABORT_REQUEST = 2 ; FCGI_END_REQUEST = 3
FCGI_PARAMS = 4 ; FCGI_STDIN = 5 ; FCGI_STDOUT = 6
FCGI_STDERR = 7 ; FCGI_DATA = 8 ; FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
# Types of management records
ManagementTypes = [FCGI_GET_VALUES]
FCGI_NULL_REQUEST_ID=0
# Masks for flags component of FCGI_BEGIN_REQUEST
FCGI_KEEP_CONN = 1
# Values for role component of FCGI_BEGIN_REQUEST
FCGI_RESPONDER = 1 ; FCGI_AUTHORIZER = 2 ; FCGI_FILTER = 3
# Values for protocolStatus component of FCGI_END_REQUEST
FCGI_REQUEST_COMPLETE = 0 # Request completed nicely
FCGI_CANT_MPX_CONN = 1 # This app can't multiplex
FCGI_OVERLOADED = 2 # New request rejected; too busy
FCGI_UNKNOWN_ROLE = 3 # Role value not known
error = 'fcgi.error'
#---------------------------------------------------------------------------
# The following function is used during debugging; it isn't called
# anywhere at the moment
def error(msg):
"Append a string to /tmp/err"
errf=open('/tmp/err', 'a+')
errf.write(msg+'\n')
errf.close()
#---------------------------------------------------------------------------
class record:
"Class representing FastCGI records"
def __init__(self):
self.version = FCGI_VERSION_1
self.recType = FCGI_UNKNOWN_TYPE
self.reqId = FCGI_NULL_REQUEST_ID
self.content = ""
#----------------------------------------
def readRecord(self, sock):
s = map(ord, sock.recv(8))
self.version, self.recType, paddingLength = s[0], s[1], s[6]
self.reqId, contentLength = (s[2]<<8)+s[3], (s[4]<<8)+s[5]
self.content = ""
while len(self.content) < contentLength:
data = sock.recv(contentLength - len(self.content))
self.content = self.content + data
if paddingLength != 0:
padding = sock.recv(paddingLength)
# Parse the content information
c = self.content
if self.recType == FCGI_BEGIN_REQUEST:
self.role = (ord(c[0])<<8) + ord(c[1])
self.flags = ord(c[2])
elif self.recType == FCGI_UNKNOWN_TYPE:
self.unknownType = ord(c[0])
elif self.recType == FCGI_GET_VALUES or self.recType == FCGI_PARAMS:
self.values={}
pos=0
while pos < len(c):
name, value, pos = readPair(c, pos)
self.values[name] = value
elif self.recType == FCGI_END_REQUEST:
b = map(ord, c[0:4])
self.appStatus = (b[0]<<24) + (b[1]<<16) + (b[2]<<8) + b[3]
self.protocolStatus = ord(c[4])
#----------------------------------------
def writeRecord(self, sock):
content = self.content
if self.recType == FCGI_BEGIN_REQUEST:
content = chr(self.role>>8) + chr(self.role & 255) + chr(self.flags) + 5*'\000'
elif self.recType == FCGI_UNKNOWN_TYPE:
content = chr(self.unknownType) + 7*'\000'
elif self.recType==FCGI_GET_VALUES or self.recType==FCGI_PARAMS:
content = ""
for i in self.values.keys():
content = content + writePair(i, self.values[i])
elif self.recType==FCGI_END_REQUEST:
v = self.appStatus
content = chr((v>>24)&255) + chr((v>>16)&255) + chr((v>>8)&255) + chr(v&255)
content = content + chr(self.protocolStatus) + 3*'\000'
cLen = len(content)
eLen = (cLen + 7) & (0xFFFF - 7) # align to an 8-byte boundary
padLen = eLen - cLen
hdr = [ self.version,
self.recType,
self.reqId >> 8,
self.reqId & 255,
cLen >> 8,
cLen & 255,
padLen,
0]
hdr = string.joinfields(map(chr, hdr), '')
sock.send(hdr + content + padLen*'\000')
#---------------------------------------------------------------------------
def readPair(s, pos):
nameLen=ord(s[pos]) ; pos=pos+1
if nameLen & 128:
b=map(ord, s[pos:pos+3]) ; pos=pos+3
nameLen=((nameLen&127)<<24) + (b[0]<<16) + (b[1]<<8) + b[2]
valueLen=ord(s[pos]) ; pos=pos+1
if valueLen & 128:
b=map(ord, s[pos:pos+3]) ; pos=pos+3
valueLen=((valueLen&127)<<24) + (b[0]<<16) + (b[1]<<8) + b[2]
return ( s[pos:pos+nameLen], s[pos+nameLen:pos+nameLen+valueLen],
pos+nameLen+valueLen )
#---------------------------------------------------------------------------
def writePair(name, value):
l=len(name)
if l<128: s=chr(l)
else:
s=chr(128|(l>>24)&255) + chr((l>>16)&255) + chr((l>>8)&255) + chr(l&255)
l=len(value)
if l<128: s=s+chr(l)
else:
s=s+chr(128|(l>>24)&255) + chr((l>>16)&255) + chr((l>>8)&255) + chr(l&255)
return s + name + value
#---------------------------------------------------------------------------
def HandleManTypes(r, conn):
if r.recType == FCGI_GET_VALUES:
r.recType = FCGI_GET_VALUES_RESULT
v={}
vars={'FCGI_MAX_CONNS' : FCGI_MAX_CONNS,
'FCGI_MAX_REQS' : FCGI_MAX_REQS,
'FCGI_MPXS_CONNS': FCGI_MPXS_CONNS}
for i in r.values.keys():
if vars.has_key(i): v[i]=vars[i]
r.values=vars
r.writeRecord(conn)
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
_isFCGI = 1 # assume it is until we find out for sure
def isFCGI():
global _isFCGI
return _isFCGI
#---------------------------------------------------------------------------
_init = None
_sock = None
class FCGI:
def __init__(self):
self.haveFinished = 0
if _init == None:
_startup()
if not isFCGI():
self.haveFinished = 1
self.inp, self.out, self.err, self.env = \
sys.stdin, sys.stdout, sys.stderr, os.environ
return
if os.environ.has_key('FCGI_WEB_SERVER_ADDRS'):
good_addrs=string.split(os.environ['FCGI_WEB_SERVER_ADDRS'], ',')
good_addrs=map(string.strip(good_addrs)) # Remove whitespace
else:
good_addrs=None
self.conn, addr=_sock.accept()
stdin, data="", ""
self.env = {}
self.requestId=0
remaining=1
# Check if the connection is from a legal address
if good_addrs!=None and addr not in good_addrs:
raise error, 'Connection from invalid server!'
while remaining:
r=record(); r.readRecord(self.conn)
if r.recType in ManagementTypes:
HandleManTypes(r, self.conn)
elif r.reqId==0:
# Oh, poopy. It's a management record of an unknown
# type. Signal the error.
r2=record()
r2.recType=FCGI_UNKNOWN_TYPE ; r2.unknownType=r.recType
r2.writeRecord(self.conn)
continue # Charge onwards
# Ignore requests that aren't active
elif r.reqId != self.requestId and r.recType != FCGI_BEGIN_REQUEST:
continue
# If we're already doing a request, ignore further BEGIN_REQUESTs
elif r.recType == FCGI_BEGIN_REQUEST and self.requestId != 0:
continue
# Begin a new request
if r.recType == FCGI_BEGIN_REQUEST:
self.requestId = r.reqId
if r.role == FCGI_AUTHORIZER: remaining=1
elif r.role == FCGI_RESPONDER: remaining=2
elif r.role == FCGI_FILTER: remaining=3
elif r.recType == FCGI_PARAMS:
if r.content == "":
remaining=remaining-1
else:
for i in r.values.keys():
self.env[i] = r.values[i]
elif r.recType == FCGI_STDIN:
if r.content == "":
remaining=remaining-1
else:
stdin=stdin+r.content
elif r.recType==FCGI_DATA:
if r.content == "":
remaining=remaining-1
else:
data=data+r.content
# end of while remaining:
self.inp = sys.stdin = StringIO(stdin)
self.err = sys.stderr = StringIO()
self.out = sys.stdout = StringIO()
self.data = StringIO(data)
def __del__(self):
self.Finish()
def Finish(self, status=0):
if not self.haveFinished:
self.haveFinished = 1
self.err.seek(0,0)
self.out.seek(0,0)
r=record()
r.recType = FCGI_STDERR
r.reqId = self.requestId
data = self.err.read()
if data:
while data:
chunk, data = self.getNextChunk(data)
r.content = chunk
r.writeRecord(self.conn)
r.content="" ; r.writeRecord(self.conn) # Terminate stream
r.recType = FCGI_STDOUT
data = self.out.read()
while data:
chunk, data = self.getNextChunk(data)
r.content = chunk
r.writeRecord(self.conn)
r.content="" ; r.writeRecord(self.conn) # Terminate stream
r=record()
r.recType=FCGI_END_REQUEST
r.reqId=self.requestId
r.appStatus=status
r.protocolStatus=FCGI_REQUEST_COMPLETE
r.writeRecord(self.conn)
self.conn.close()
def getFieldStorage(self):
method = 'GET'
if self.env.has_key('REQUEST_METHOD'):
method = string.upper(self.env['REQUEST_METHOD'])
if method == 'GET':
return cgi.FieldStorage(environ=self.env, keep_blank_values=1)
else:
return cgi.FieldStorage(fp=self.inp, environ=self.env, keep_blank_values=1)
def getNextChunk(self, data):
chunk = data[:8192]
data = data[8192:]
return chunk, data
Accept = FCGI # alias for backwards compatibility
#---------------------------------------------------------------------------
def _startup():
global _init
_init = 1
try:
s=socket.fromfd(sys.stdin.fileno(), socket.AF_INET,
socket.SOCK_STREAM)
s.getpeername()
except socket.error, (err, errmsg):
if err!=errno.ENOTCONN: # must be a non-fastCGI environment
global _isFCGI
_isFCGI = 0
return
global _sock
_sock = s
#---------------------------------------------------------------------------
def _test():
counter=0
try:
while isFCGI():
req = Accept()
counter=counter+1
try:
fs = req.getFieldStorage()
size = string.atoi(fs['size'].value)
doc = ['*' * size]
except:
doc = ['<HTML><HEAD><TITLE>FCGI TestApp</TITLE></HEAD>\n<BODY>\n']
doc.append('<H2>FCGI TestApp</H2><P>')
doc.append('<b>request count</b> = %d<br>' % counter)
# doc.append('<b>pid</b> = %s<br>' % os.getpid())
# if req.env.has_key('CONTENT_LENGTH'):
# cl = string.atoi(req.env['CONTENT_LENGTH'])
# doc.append('<br><b>POST data (%s):</b><br><pre>' % cl)
# keys = fs.keys()
# keys.sort()
# for k in keys:
# val = fs[k]
# if type(val) == type([]):
# doc.append(' <b>%-15s :</b> %s\n' % (k, val))
# else:
# doc.append(' <b>%-15s :</b> %s\n' % (k, val.value))
# doc.append('</pre>')
#
#
# doc.append('<P><HR><P><pre>')
# keys = req.env.keys()
# keys.sort()
# for k in keys:
# doc.append('<b>%-20s :</b> %s\n' % (k, req.env[k]))
# doc.append('\n</pre><P><HR>\n')
doc.append('</BODY></HTML>\n')
doc = string.join(doc, '')
req.out.write('Content-length: %s\r\n'
'Content-type: text/html\r\n'
'Cache-Control: no-cache\r\n'
'\r\n'
% len(doc))
req.out.write(doc)
req.Finish()
except:
import traceback
f = open('traceback', 'w')
traceback.print_exc( file = f )
# f.write('%s' % doc)
if __name__=='__main__':
#import pdb
#pdb.run('_test()')
_test()
| gpl-2.0 | 788,579,133,595,370,900 | 33.072727 | 91 | 0.463971 | false |
W4ngatang/DocumentSummarizer | postprocess/gen_preds.py | 1 | 3663 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create the data for the LSTM.
"""
import os
import sys
import argparse
import numpy as np
import h5py
import itertools
import pdb
import re
from collections import defaultdict
def gen_preds(args, gold=0):
if not gold:
try:
k = args.k
srctxt = open(args.srctxt, 'r') # text from source
docs = srctxt.read().split('\n')[:-1]
predfile = h5py.File(args.predfile, 'r') # predictions
preds = np.array(predfile['preds'])
srcfile = h5py.File(args.srcfile, 'r') # hdf5 fed into lua
order = np.array(srcfile['source_order'])
lengths = np.array(srcfile['target_l_all'])
sorted_docs = [] # need to get pruned version of docs
for idx in order:
sorted_docs.append(docs[idx])
path = args.outfile + '/tmp_SYSTEM/'+args.system + '/'
if not os.path.exists(path):
os.makedirs(path)
for i, (doc,pred) in enumerate(zip(sorted_docs,preds)):
idxs = (-pred[1:lengths[i]-1]).argsort()[:k] # get the k-highest scoring indices; NEED TO NOT COUNT BOS/EOS
idxs.sort() # sort them so they follow order of the article
sents = doc.split("</s>") # get corresponding sentences
summary = [sents[idx] for idx in idxs]
with open(path+"news"+str(i)+"." + args.system + ".system", "w+") as fh:
for s in summary:
fh.write(s+'\n')
except Exception as e:
pdb.set_trace()
else: # lazy coding
try:
srcfile = h5py.File(args.srcfile, 'r')
order = np.array(srcfile['source_order'])
goldtxt = open(args.goldfile, 'r')
docs = goldtxt.read().split('\n')[:-1]
sorted_docs = [] # need to get pruned version of docs
for idx in order:
sorted_docs.append(docs[idx])
path = args.outfile + '/tmp_GOLD/'
if not os.path.exists(path):
os.makedirs(path)
for i, summary in enumerate(sorted_docs):
task = "news"+str(i)
if not os.path.exists(path+task):
os.makedirs(path+task)
with open(path+task+"/"+task+".1.gold", "w+") as fh:
for s in summary.split(" </s> "):
fh.write(s+'\n')
except Exception as e:
pdb.set_trace()
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--k', help="Number of highest scoring sentences to take", type=int, default=3)
parser.add_argument('--srcfile', help="Path to the source hdf5 file to get sentence order from. ", type=str)
parser.add_argument('--srctxt', help="Path to the source text. ", type=str)
parser.add_argument('--predfile', help="Path to the predictions. ", type=str)
parser.add_argument('--goldfile', help="Path to the gold standard summaries. ", type=str)
parser.add_argument('--outfile', help="Path to the folder that will contain the files. ", type=str)
parser.add_argument('--system', help="Name of system; \'gold\' for gold", type=str, default='ne')
parser.add_argument('--rougedir', help="Name of directory to ROUGE system + reference files", type=str)
args = parser.parse_args(arguments)
gen_preds(args) # generate predictions
gen_preds(args, 1) # generate gold
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit | -3,497,275,557,048,698,400 | 41.103448 | 123 | 0.568387 | false |
letouriste001/SmartForest_2.0 | python3.4Smartforest/lib/python3.4/site-packages/django/utils/encoding.py | 1 | 10090 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import datetime
import locale
from decimal import Decimal
from django.utils import six
from django.utils.functional import Promise
from django.utils.six.moves.urllib.parse import quote, unquote
if six.PY3:
from urllib.parse import unquote_to_bytes
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
# For backwards compatibility. (originally in Django, then added to six 1.9)
python_2_unicode_compatible = six.python_2_unicode_compatible
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
_PROTECTED_TYPES = six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join(force_text(arg, encoding, strings_only, errors)
for arg in s)
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, six.memoryview):
return bytes(s)
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join(force_bytes(arg, encoding, strings_only, errors)
for arg in s)
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Takes an IRI in UTF-8 bytes (e.g. '/I \xe2\x99\xa5 Django/') or unicode
(e.g. '/I ♥ Django/') and returns ASCII bytes containing the encoded result
(e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def uri_to_iri(uri):
"""
Converts a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from section 3.2 of RFC 3987.
Takes an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and returns
unicode containing the encoded result (e.g. '/I \xe2\x99\xa5 Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
iri = unquote_to_bytes(uri) if six.PY3 else unquote(uri)
return repercent_broken_unicode(iri).decode('utf-8')
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in
# sections 2.2 and 2.3 of RFC 2396:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to section 3.3 of RFC 2396.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(force_bytes(path), safe=b"/:@&+$,-_.!~*'()")
def repercent_broken_unicode(path):
"""
As per section 3.2 of RFC 3987, step three of converting a URI into an IRI,
we need to re-percent-encode any octet produced that is not part of a
strictly legal UTF-8 octet sequence.
"""
try:
path.decode('utf-8')
except UnicodeDecodeError as e:
repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = repercent_broken_unicode(
path[:e.start] + force_bytes(repercent) + path[e.end:])
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| mit | 3,089,730,319,248,334,000 | 34.900356 | 81 | 0.619151 | false |
DavidHHShao/slack | tests/unit/files/test_list.py | 2 | 1462 | # Copyright (c) 2014 Katsuya Noguchi
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from mock import patch
import unittest
import slack
import slack.files
import slack.http_client
slack.api_token = 'my_token'
class TestFilesList(unittest.TestCase):
@patch.object(slack.http_client, 'get')
def test_info(self, http_get_mock):
slack.files.list()
http_get_mock.assert_called_with('files.list', {
'token': 'my_token',
})
| mit | 7,748,754,576,475,866,000 | 37.473684 | 74 | 0.743502 | false |
eirannejad/pyRevit | pyrevitlib/pyrevit/output/__init__.py | 1 | 23733 | """Provide access to output window and its functionality.
This module provides access to the output window for the currently running
pyRevit command. The proper way to access this wrapper object is through
the :func:`get_output` of :mod:`pyrevit.script` module. This method, in return
uses the `pyrevit.output` module to get access to the output wrapper.
Example:
>>> from pyrevit import script
>>> output = script.get_output()
Here is the source of :func:`pyrevit.script.get_output`. As you can see this
functions calls the :func:`pyrevit.output.get_output` to receive the
output wrapper.
.. literalinclude:: ../../pyrevitlib/pyrevit/script.py
:pyobject: get_output
"""
from __future__ import print_function
import os.path as op
import itertools
from pyrevit import HOST_APP, EXEC_PARAMS
from pyrevit import framework
from pyrevit import coreutils
from pyrevit.coreutils import logger
from pyrevit.coreutils import markdown, charts
from pyrevit.coreutils import envvars
from pyrevit.runtime.types import ScriptConsoleManager
from pyrevit.output import linkmaker
from pyrevit.userconfig import user_config
from pyrevit import DB
#pylint: disable=W0703,C0302,C0103
mlogger = logger.get_logger(__name__)
DEFAULT_STYLESHEET_NAME = 'outputstyles.css'
def docclosing_eventhandler(sender, args): #pylint: disable=W0613
"""Close all output window on document closing."""
ScriptConsoleManager.CloseActiveOutputWindows()
def setup_output_closer():
"""Setup document closing event listener."""
HOST_APP.app.DocumentClosing += \
framework.EventHandler[DB.Events.DocumentClosingEventArgs](
docclosing_eventhandler
)
def set_stylesheet(stylesheet):
"""Set active css stylesheet used by output window.
Args:
stylesheet (str): full path to stylesheet file
"""
if op.isfile(stylesheet):
envvars.set_pyrevit_env_var(envvars.OUTPUT_STYLESHEET_ENVVAR,
stylesheet)
# do not store this setting forcefully
# each repo should default to its own stylesheet
# user_config.output_stylesheet = stylesheet
def get_stylesheet():
"""Return active css stylesheet used by output window."""
return envvars.get_pyrevit_env_var(envvars.OUTPUT_STYLESHEET_ENVVAR)
def get_default_stylesheet():
"""Return default css stylesheet used by output window."""
return op.join(op.dirname(__file__), DEFAULT_STYLESHEET_NAME)
def reset_stylesheet():
"""Reset active stylesheet to default."""
envvars.set_pyrevit_env_var(envvars.OUTPUT_STYLESHEET_ENVVAR,
get_default_stylesheet())
# setup output window stylesheet
if not EXEC_PARAMS.doc_mode:
active_stylesheet = \
user_config.output_stylesheet or get_default_stylesheet()
set_stylesheet(active_stylesheet)
class PyRevitOutputWindow(object):
"""Wrapper to interact with the output window."""
@property
def window(self):
"""``PyRevitLabs.PyRevit.Runtime.ScriptConsole``: Return output window object."""
return EXEC_PARAMS.window_handle
@property
def renderer(self):
"""Return html renderer inside output window.
Returns:
``System.Windows.Forms.WebBrowser`` (In current implementation)
"""
if self.window:
return self.window.renderer
@property
def output_id(self):
"""str: Return id of the output window.
In current implementation, Id of output window is equal to the
unique id of the pyRevit command it belongs to. This means that all
output windows belonging to the same pyRevit command, will have
identical output_id values.
"""
if self.window:
return self.window.OutputId
@property
def output_uniqueid(self):
"""str: Return unique id of the output window.
In current implementation, unique id of output window is a GUID string
generated when the output window is opened. This id is unique to the
instance of output window.
"""
if self.window:
return self.window.OutputUniqueId
@property
def is_closed_by_user(self):
return self.window.ClosedByUser
@property
def last_line(self):
return self.window.GetLastLine()
@property
def debug_mode(self):
"""Set debug mode on output window and stream.
This will cause the output window to print information about the
buffer stream and other aspects of the output window mechanism.
"""
return EXEC_PARAMS.output_stream.PrintDebugInfo
@debug_mode.setter
def debug_mode(self, value):
EXEC_PARAMS.output_stream.PrintDebugInfo = value
def _get_head_element(self):
return self.renderer.Document.GetElementsByTagName('head')[0]
def _get_body_element(self):
return self.renderer.Document.GetElementsByTagName('body')[0]
def self_destruct(self, seconds):
"""Set self-destruct (close window) timer.
Args:
seconds (int): number of seconds after which window is closed.
"""
if self.window:
self.window.SelfDestructTimer(seconds)
def inject_to_head(self, element_tag, element_contents, attribs=None):
"""Inject html element to current html head of the output window.
Args:
element_tag (str): html tag of the element e.g. 'div'
element_contents (str): html code of the element contents
attribs (:obj:`dict`): dictionary of attribute names and value
Example:
>>> output = pyrevit.output.get_output()
>>> output.inject_to_head('script',
'', # no script since it's a link
{'src': js_script_file_path})
"""
html_element = self.renderer.Document.CreateElement(element_tag)
if element_contents:
html_element.InnerHtml = element_contents
if attribs:
for attribute, value in attribs.items():
html_element.SetAttribute(attribute, value)
# inject the script into head
head_el = self._get_head_element()
head_el.AppendChild(html_element)
if self.window:
self.window.WaitReadyBrowser()
def inject_to_body(self, element_tag, element_contents, attribs=None):
"""Inject html element to current html body of the output window.
Args:
element_tag (str): html tag of the element e.g. 'div'
element_contents (str): html code of the element contents
attribs (:obj:`dict`): dictionary of attribute names and value
Example:
>>> output = pyrevit.output.get_output()
>>> output.inject_to_body('script',
'', # no script since it's a link
{'src': js_script_file_path})
"""
html_element = self.renderer.Document.CreateElement(element_tag)
if element_contents:
html_element.InnerHtml = element_contents
if attribs:
for attribute, value in attribs.items():
html_element.SetAttribute(attribute, value)
# inject the script into body
body_el = self._get_body_element()
body_el.AppendChild(html_element)
if self.window:
self.window.WaitReadyBrowser()
def inject_script(self, script_code, attribs=None, body=False):
"""Inject script tag into current head (or body) of the output window.
Args:
script_code (str): javascript code
attribs (:obj:`dict`): dictionary of attribute names and value
body (bool, optional): injects script into body instead of head
Example:
>>> output = pyrevit.output.get_output()
>>> output.inject_script('', # no script since it's a link
{'src': js_script_file_path})
"""
if body:
self.inject_to_body('script', script_code, attribs=attribs)
else:
self.inject_to_head('script', script_code, attribs=attribs)
def add_style(self, style_code, attribs=None):
"""Inject style tag into current html head of the output window.
Args:
style_code (str): css styling code
attribs (:obj:`dict`): dictionary of attribute names and value
Example:
>>> output = pyrevit.output.get_output()
>>> output.add_style('body { color: blue; }')
"""
self.inject_to_head('style', style_code, attribs=attribs)
def get_head_html(self):
"""str: Return inner code of html head element."""
return self._get_head_element().InnerHtml
def set_title(self, new_title):
"""Set window title to the new title."""
if self.window:
self.window.Title = new_title
def set_width(self, width):
"""Set window width to the new width."""
if self.window:
self.window.Width = width
def set_height(self, height):
"""Set window height to the new height."""
if self.window:
self.window.Height = height
def set_font(self, font_family, font_size):
"""Set window font family to the new font family and size.
Args:
font_family (str): font family name e.g. 'Courier New'
font_size (int): font size e.g. 16
"""
self.renderer.Font = \
framework.Drawing.Font(font_family,
font_size,
framework.Drawing.FontStyle.Regular,
framework.Drawing.GraphicsUnit.Point)
def resize(self, width, height):
"""Resize window to the new width and height."""
self.set_width(width)
self.set_height(height)
def center(self):
"""Center the output window on the screen"""
screen_area = HOST_APP.proc_screen_workarea
left = \
(abs(screen_area.Right - screen_area.Left) / 2) \
- (self.get_width() / 2)
top = \
(abs(screen_area.Top - screen_area.Bottom) / 2) \
- (self.get_height() / 2)
self.window.Left = left
self.window.Top = top
def get_title(self):
"""str: Return current window title."""
if self.window:
return self.window.Text
def get_width(self):
"""int: Return current window width."""
if self.window:
return self.window.Width
def get_height(self):
"""int: Return current window height."""
if self.window:
return self.window.Height
def close(self):
"""Close the window."""
if self.window:
self.window.Close()
def close_others(self, all_open_outputs=False):
"""Close all other windows that belong to the current command.
Args:
all_open_outputs (bool): Close all any other windows if True
"""
if all_open_outputs:
ScriptConsoleManager.CloseActiveOutputWindows(self.window)
else:
ScriptConsoleManager.CloseActiveOutputWindows(self.window,
self.output_id)
def hide(self):
"""Hide the window."""
if self.window:
self.window.Hide()
def show(self):
"""Show the window."""
if self.window:
self.window.Show()
def lock_size(self):
"""Lock window size."""
if self.window:
self.window.LockSize()
def unlock_size(self):
"""Unock window size."""
if self.window:
self.window.UnlockSize()
def freeze(self):
"""Freeze output content update."""
if self.window:
self.window.Freeze()
def unfreeze(self):
"""Unfreeze output content update."""
if self.window:
self.window.Unfreeze()
def save_contents(self, dest_file):
"""Save html code of the window.
Args:
dest_file (str): full path of the destination html file
"""
if self.renderer:
html = \
self.renderer.Document.Body.OuterHtml.encode('ascii', 'ignore')
doc_txt = self.renderer.DocumentText
full_html = doc_txt.lower().replace('<body></body>', html)
with open(dest_file, 'w') as output_file:
output_file.write(full_html)
def open_url(self, dest_url):
"""Open url page in output window.
Args:
dest_url (str): web url of the target page
"""
if self.renderer:
self.renderer.Navigate(dest_url, False)
def open_page(self, dest_file):
"""Open html page in output window.
Args:
dest_file (str): full path of the target html file
"""
self.show()
self.open_url('file:///' + dest_file)
def update_progress(self, cur_value, max_value):
"""Activate and update the output window progress bar.
Args:
cur_value (float): current progress value e.g. 50
max_value (float): total value e.g. 100
Example:
>>> output = pyrevit.output.get_output()
>>> for i in range(100):
>>> output.update_progress(i, 100)
"""
if self.window:
self.window.UpdateActivityBar(cur_value, max_value)
def reset_progress(self):
"""Reset output window progress bar to zero."""
if self.window:
self.window.UpdateActivityBar(0, 1)
def hide_progress(self):
"""Hide output window progress bar."""
if self.window:
self.window.SetActivityBarVisibility(False)
def unhide_progress(self):
"""Unhide output window progress bar."""
if self.window:
self.window.SetActivityBarVisibility(True)
def indeterminate_progress(self, state):
"""Show or hide indeterminate progress bar. """
if self.window:
self.window.UpdateActivityBar(state)
def show_logpanel(self):
"""Show output window logging panel."""
if self.window:
self.window.SetActivityBarVisibility(True)
def hide_logpanel(self):
"""Hide output window logging panel."""
if self.window:
self.show_logpanel()
self.window.SetActivityBarVisibility(False)
def log_debug(self, message):
"""Report DEBUG message into output logging panel."""
if self.window:
self.show_logpanel()
self.window.activityBar.ConsoleLog(message)
def log_success(self, message):
"""Report SUCCESS message into output logging panel."""
if self.window:
self.show_logpanel()
self.window.activityBar.ConsoleLogOK(message)
def log_info(self, message):
"""Report INFO message into output logging panel."""
if self.window:
self.show_logpanel()
self.window.activityBar.ConsoleLogInfo(message)
def log_warning(self, message):
"""Report WARNING message into output logging panel."""
if self.window:
self.show_logpanel()
self.window.activityBar.ConsoleLogWarning(message)
def log_error(self, message):
"""Report ERROR message into output logging panel."""
if self.window:
self.show_logpanel()
self.window.activityBar.ConsoleLogError(message)
def set_icon(self, iconpath):
"""Sets icon on the output window."""
if self.window:
self.window.SetIcon(iconpath)
def reset_icon(self):
"""Sets icon on the output window."""
if self.window:
self.window.ResetIcon()
@staticmethod
def print_html(html_str):
"""Add the html code to the output window.
Example:
>>> output = pyrevit.output.get_output()
>>> output.print_html('<strong>Title</strong>')
"""
print(coreutils.prepare_html_str(html_str),
end="")
@staticmethod
def print_code(code_str):
"""Print code to the output window with special formatting.
Example:
>>> output = pyrevit.output.get_output()
>>> output.print_code('value = 12')
"""
code_div = '<div class="code">{}</div>'
print(
coreutils.prepare_html_str(
code_div.format(
code_str.replace(' ', ' '*4)
)
),
end=""
)
@staticmethod
def print_md(md_str):
"""Process markdown code and print to output window.
Example:
>>> output = pyrevit.output.get_output()
>>> output.print_md('### Title')
"""
tables_ext = 'pyrevit.coreutils.markdown.extensions.tables'
markdown_html = markdown.markdown(md_str, extensions=[tables_ext])
markdown_html = markdown_html.replace('\n', '').replace('\r', '')
html_code = coreutils.prepare_html_str(markdown_html)
print(html_code, end="")
def print_table(self, table_data, columns=None, formats=None,
title='', last_line_style=''):
"""Print provided data in a table in output window.
Args:
table_data (:obj:`list` of iterables): 2D array of data
title (str): table title
columns (:obj:`list` str): list of column names
formats (:obj:`list` str): column data formats
last_line_style (str): css style of last row
Example:
>>> data = [
... ['row1', 'data', 'data', 80 ],
... ['row2', 'data', 'data', 45 ],
... ]
>>> output.print_table(
... table_data=data,
... title="Example Table",
... columns=["Row Name", "Column 1", "Column 2", "Percentage"],
... formats=['', '', '', '{}%'],
... last_line_style='color:red;'
... )
"""
if not columns:
columns = []
if not formats:
formats = []
if last_line_style:
self.add_style('tr:last-child {{ {style} }}'
.format(style=last_line_style))
zipper = itertools.izip_longest #pylint: disable=E1101
adjust_base_col = '|'
adjust_extra_col = ':---|'
base_col = '|'
extra_col = '{data}|'
# find max column count
max_col = max([len(x) for x in table_data])
header = ''
if columns:
header = base_col
for idx, col_name in zipper(range(max_col), columns, fillvalue=''): #pylint: disable=W0612
header += extra_col.format(data=col_name)
header += '\n'
justifier = adjust_base_col
for idx in range(max_col):
justifier += adjust_extra_col
justifier += '\n'
rows = ''
for entry in table_data:
row = base_col
for idx, attrib, attr_format \
in zipper(range(max_col), entry, formats, fillvalue=''):
if attr_format:
value = attr_format.format(attrib)
else:
value = attrib
row += extra_col.format(data=value)
rows += row + '\n'
table = header + justifier + rows
self.print_md('### {title}'.format(title=title))
self.print_md(table)
def print_image(self, image_path):
r"""Prints given image to the output.
Example:
>>> output = pyrevit.output.get_output()
>>> output.print_image(r'C:\image.gif')
"""
self.print_html(
"<span><img src=\"file:///{0}\"></span>".format(
image_path
)
)
def insert_divider(self, level=''):
"""Add horizontal rule to the output window."""
self.print_md('%s\n-----' % level)
def next_page(self):
"""Add hidden next page tag to the output window.
This is helpful to silently separate the output to multiple pages
for better printing.
"""
self.print_html('<div class="nextpage"></div><div> </div>')
@staticmethod
def linkify(element_ids, title=None):
"""Create clickable link for the provided element ids.
This method, creates the link but does not print it directly.
Args:
element_ids (`ElementId`) or
element_ids (:obj:`list` of `ElementId`): single or multiple ids
title (str): tile of the link. defaults to list of element ids
Example:
>>> output = pyrevit.output.get_output()
>>> for idx, elid in enumerate(element_ids):
>>> print('{}: {}'.format(idx+1, output.linkify(elid)))
"""
return coreutils.prepare_html_str(
linkmaker.make_link(element_ids, contents=title)
)
def make_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return chart object."""
return charts.PyRevitOutputChart(self, version=version)
def make_line_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return line chart object."""
return charts.PyRevitOutputChart(
self,
chart_type=charts.LINE_CHART,
version=version
)
def make_stacked_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return stacked chart object."""
chart = charts.PyRevitOutputChart(
self,
chart_type=charts.LINE_CHART,
version=version
)
chart.options.scales = {'yAxes': [{'stacked': True, }]}
return chart
def make_bar_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return bar chart object."""
return charts.PyRevitOutputChart(
self,
chart_type=charts.BAR_CHART,
version=version
)
def make_radar_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return radar chart object."""
return charts.PyRevitOutputChart(
self,
chart_type=charts.RADAR_CHART,
version=version
)
def make_polar_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return polar chart object."""
return charts.PyRevitOutputChart(
self,
chart_type=charts.POLAR_CHART,
version=version
)
def make_pie_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return pie chart object."""
return charts.PyRevitOutputChart(
self,
chart_type=charts.PIE_CHART,
version=version
)
def make_doughnut_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return dougnut chart object."""
return charts.PyRevitOutputChart(
self,
chart_type=charts.DOUGHNUT_CHART,
version=version
)
def make_bubble_chart(self, version=None):
""":obj:`PyRevitOutputChart`: Return bubble chart object."""
return charts.PyRevitOutputChart(
self,
chart_type=charts.BUBBLE_CHART,
version=version
)
def get_output():
""":obj:`pyrevit.output.PyRevitOutputWindow` : Return output window."""
return PyRevitOutputWindow()
| gpl-3.0 | 1,745,673,272,930,747,100 | 32.008345 | 102 | 0.577845 | false |
KirarinSnow/Google-Code-Jam | World Finals 2010/D.py | 1 | 1649 | #!/usr/bin/env python
#
# Problem: Travel Plan
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
for case in range(int(raw_input())):
n = int(raw_input())
x = map(int, raw_input().split())
f = int(raw_input())
x.sort()
if 2*(x[n-1]-x[0]) > f:
ans = "NO SOLUTION"
else:
n1 = n/2
n2 = n-n1
x1 = x[:n1]
x2 = x[n1:][::-1]
ans = -1
for v in range(2, 2*n1+2, 2):
s = []
a = []
for p in range(2):
ni = (n1, n2)[p]
y = (x1, x2)[p]
s.append(set())
k = [(0, 2)]
for i in range(1, ni):
kk = []
for t, w in k:
if abs(w-v) <= 2*(ni-i):
tt = t+w*abs(y[i]-y[i-1])
for ww in range(max(2, w-2), w+4, 2):
kk.append((tt, ww))
k = kk
for t, w in k:
s[-1].add(t)
a.append(sorted(list(s[-1])))
h = v*(x2[n2-1]-x1[n1-1])
for r in a[0]:
l = 0
u = len(a[1])
while l < u-1:
m = (l+u)/2
z = r+a[1][m]+h
if z > f:
u = m
else:
l = m
c = r+a[1][l]+h
if c <= f:
ans = max(ans, c)
print "Case #%d: %s" % (case+1, ans)
| gpl-3.0 | -9,187,201,303,024,424,000 | 25.596774 | 65 | 0.299576 | false |
smogol66/radiopy | settingsjson.py | 1 | 2432 | import json
settings_json = json.dumps([
{'type': 'title',
'title': 'Base title'},
{'type': 'numeric',
'title': 'Startup volume',
'desc': 'Default volume at boot',
'section': 'Base',
'key': 'startupvolume'},
{'type': 'path',
'title': 'Media path',
'desc': 'Path to the music directory',
'section': 'Base',
'key': 'mediapath'},
{'type': 'bool',
'title': 'Scan sub-folders',
'desc': 'Scan sub-folders to laod medias',
'section': 'Base',
'key': 'boolsub_folders'},
{'type': 'scrolloptions',
'title': 'Equalizer preset',
'desc': 'Select autio equalizer preset',
'section': 'Base',
'key': 'equalizer',
'options': ['Flat', 'Classical', 'Club', 'Dance', 'Full bass', 'Full bass and treble', 'Full treble',
'Headphones', 'Large Hall', 'Live', 'Party', 'Pop', 'Reggae', 'Rock', 'Ska', 'Soft',
'Soft rock', 'Techno']},
{'type': 'numeric',
'title': 'Screen brightness',
'desc': 'Default brightness at boot',
'section': 'Base',
'key': 'brightness'},
{'type': 'numeric',
'title': 'Blank brightness',
'desc': 'Default brightness on blank',
'section': 'Base',
'key': 'blank_brightness'},
{'type': 'numeric',
'title': 'Blank timeout',
'desc': 'seconds to blanks screen',
'section': 'Base',
'key': 'blank_timeout'},
{'type': 'options',
'title': 'Base lamp',
'desc': 'Behaviour of the base lamp',
'section': 'Base',
'key': 'baselamp',
'options': ['off', 'always on', 'only on alarm']},
{'type': 'colorpicker',
'title': 'Color of the lamp',
'desc': 'Color of the lamp',
'section': 'Base',
'key': 'runcolor'},
{'type': 'options',
'title': 'Alarm resume scheme',
'desc': 'Time to next resumes in min.',
'section': 'Base',
'key': 'resume_scheme',
'options': ['20,10,5', '10,10,10', '1, 0.5,0.25']},
{'type': 'bool',
'title': 'Power down',
'desc': 'Quit the application and shutdown',
'section': 'Base',
'key': 'shutdown'},
{'type': 'bool',
'title': 'Reboot',
'desc': 'Quit the application and reboot',
'section': 'Base',
'key': 'reboot'},
# {'type': 'string',
# 'title': 'A string setting',
# 'desc': 'String description text',
# 'section': 'Base',
# 'key': 'stringexample'},
])
| gpl-3.0 | 5,109,011,461,259,409,000 | 29.4 | 106 | 0.521382 | false |
pulsar-chem/Pulsar-Core | test/system/TestAtom.py | 1 | 1937 | import pulsar as psr
def run_test():
tester=psr.PyTester("Testing the Atom class")
H, H2=psr.create_atom([0.0,0.0,0.0],1), psr.create_atom([0.0,0.0,0.0],1,1)
tester.test_return("create_atom works",True,True,H.__eq__,H2)
tester.test_equal("correct Z",1,H.Z)
tester.test_equal("correct isotope",1,H.isotope)
tester.test_equal("correct mass",1.007975,H.mass)
tester.test_equal("correct isotope mass",1.0078250322,H.isotope_mass)
tester.test_equal("correct charge",0,H.charge)
tester.test_equal("correct multiplicity",2,H.multiplicity)
tester.test_equal("correct nelectrons",1,H.nelectrons)
tester.test_equal("correct covalent radius",0.5858150988919267,H.cov_radius)
tester.test_equal("correct vDW radius",2.267671350549394,H.vdw_radius)
H3=psr.Atom(H2)
tester.test_return("copy constructor works",True,True,H.__eq__,H3)
D=psr.create_atom([0.0,0.0,0.0],1,2)
tester.test_equal("Isotopes work",2,D.isotope)
tester.test_return("Isotopes are different",True,True,D.__ne__,H)
tester.test_return("hash works",True,H.my_hash(),H2.my_hash)
tester.test_return("hash works 1",True,H.my_hash(),H3.my_hash)
GH=psr.make_ghost_atom(H2)
tester.test_return("ghost works",True,True,psr.is_ghost_atom,GH)
q=psr.make_point_charge(H2,3.3)
q2=psr.make_point_charge(H2.get_coords(),3.3)
tester.test_return("point charges work",True,True,psr.is_point_charge,q)
tester.test_return("point charges work 2",True,True,psr.is_point_charge,q2)
tester.test_return("is same point charge",True,True,q.__eq__,q2)
Dm=psr.make_dummy_atom(H)
Dm2=psr.make_dummy_atom(H.get_coords())
tester.test_return("is dummy atom",True,True,psr.is_dummy_atom,Dm)
tester.test_return("is dummy atom 2",True,True,psr.is_dummy_atom,Dm2)
tester.test_return("is same dummy atom",True,True,Dm.__eq__,Dm2)
tester.print_results()
return tester.nfailed()
| bsd-3-clause | 1,443,255,930,028,945,400 | 46.243902 | 80 | 0.68921 | false |
Harvard-ATG/media_management_lti | media_manager/urls.py | 1 | 1193 | """media_manager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/{{ docs_version }}/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from __future__ import absolute_import
from django.urls import include, path
from django.contrib import admin
from . import views
app_name = "media_manager"
urlpatterns = [
path('', views.app, name='index'),
path('app', views.app, name='app'),
path('mirador/<int:collection_id>', views.mirador, name="mirador"),
path('endpoints/module', views.module_endpoint, name="module_endpoint"),
path('lti/launch', views.LTILaunchView.as_view(), name="lti_launch"),
path('lti/config', views.LTIToolConfigView.as_view(), name="lti_config"),
]
| bsd-3-clause | -1,802,701,804,387,765,000 | 40.137931 | 77 | 0.69321 | false |
gymnasium/edx-platform | common/djangoapps/student/views/management.py | 1 | 59233 | """
Student Views
"""
import datetime
import json
import logging
import uuid
import warnings
from collections import namedtuple
import analytics
import dogstats_wrapper as dog_stats_api
from bulk_email.models import Optout
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as django_login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.views import password_reset_confirm
from django.core import mail
from django.urls import reverse
from django.core.validators import ValidationError, validate_email
from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import Signal, receiver
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import redirect
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.utils.encoding import force_bytes, force_text
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import get_language, ungettext
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_GET, require_POST
from eventtracking import tracker
from ipware.ip import get_ip
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from requests import HTTPError
from six import text_type, iteritems
from social_core.exceptions import AuthAlreadyAssociated, AuthException
from social_django import utils as social_utils
from xmodule.modulestore.django import modulestore
import openedx.core.djangoapps.external_auth.views
import third_party_auth
import track.views
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_response, render_to_string
from entitlements.models import CourseEntitlement
from openedx.core.djangoapps import monitoring_utils
from openedx.core.djangoapps.catalog.utils import (
get_programs_with_type,
)
from openedx.core.djangoapps.embargo import api as embargo_api
from openedx.core.djangoapps.external_auth.login_and_register import register as external_auth_register
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.user_api import accounts as accounts_settings
from openedx.core.djangoapps.user_api.accounts.utils import generate_password
from openedx.core.djangoapps.user_api.models import UserRetirementRequest
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.user_api.config.waffle import PREVENT_AUTH_USER_WRITES, SYSTEM_MAINTENANCE_MSG, waffle
from openedx.core.djangolib.markup import HTML, Text
from student.cookies import set_logged_in_cookies
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from student.helpers import (
DISABLE_UNENROLL_CERT_STATES,
AccountValidationError,
auth_pipeline_urls,
authenticate_new_user,
cert_info,
create_or_set_user_attribute_created_on_site,
destroy_oauth_tokens,
do_create_account,
generate_activation_email_context,
get_next_url_for_login_page
)
from student.models import (
CourseEnrollment,
PasswordHistory,
PendingEmailChange,
Registration,
RegistrationCookieConfiguration,
UserAttribute,
UserProfile,
UserSignupSource,
UserStanding,
create_comments_service_user,
email_exists_or_retired,
)
from student.signals import REFUND_ORDER
from student.tasks import send_activation_email
from student.text_me_the_app import TextMeTheAppFragmentView
from third_party_auth import pipeline, provider
from third_party_auth.saml import SAP_SUCCESSFACTORS_SAML_KEY
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.password_policy_validators import SecurityPolicyError, validate_password
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple(
'ReverifyInfo',
'course_id course_name course_number date status display'
)
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
REGISTRATION_UTM_PARAMETERS = {
'utm_source': 'registration_utm_source',
'utm_medium': 'registration_utm_medium',
'utm_campaign': 'registration_utm_campaign',
'utm_term': 'registration_utm_term',
'utm_content': 'registration_utm_content',
}
REGISTRATION_UTM_CREATED_AT = 'registration_utm_created_at'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "registration"])
def csrf_token(context):
"""
A csrf token that can be included in a form.
"""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="{}" /></div>'.format(token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# Maximum number of courses to display on the homepage.
context['homepage_course_max'] = configuration_helpers.get_value(
'HOMEPAGE_COURSE_MAX', settings.HOMEPAGE_COURSE_MAX
)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
# Add marketable programs to the context.
context['programs_list'] = get_programs_with_type(request.site, include_hidden=False)
return render_to_response('index.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""
Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`.
"""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated:
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(
openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def compose_and_send_activation_email(user, profile, user_registration=None):
"""
Construct all the required params and send the activation email
through celery task
Arguments:
user: current logged-in user
profile: profile object of the current logged-in user
user_registration: registration of the current logged-in user
"""
dest_addr = user.email
if user_registration is None:
user_registration = Registration.objects.get(user=user)
context = generate_activation_email_context(user, user_registration)
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message_for_activation = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
from_address = configuration_helpers.get_value('ACTIVATION_EMAIL_FROM_ADDRESS', from_address)
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message_for_activation = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message_for_activation)
send_activation_email.delay(subject, message_for_activation, from_address, dest_addr)
@login_required
def course_run_refund_status(request, course_id):
"""
Get Refundable status for a course.
Arguments:
request: The request object.
course_id (str): The unique identifier for the course.
Returns:
Json response.
"""
try:
course_key = CourseKey.from_string(course_id)
course_enrollment = CourseEnrollment.get_enrollment(request.user, course_key)
except InvalidKeyError:
logging.exception("The course key used to get refund status caused InvalidKeyError during look up.")
return JsonResponse({'course_refundable_status': ''}, status=406)
refundable_status = course_enrollment.refundable()
logging.info("Course refund status for course {0} is {1}".format(course_id, refundable_status))
return JsonResponse({'course_refundable_status': refundable_status}, status=200)
def _update_email_opt_in(request, org):
"""
Helper function used to hit the profile API if email opt-in is enabled.
"""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
TODO: This is lms specific and does not belong in common code.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated:
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = CourseKey.from_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
# Allow us to monitor performance of this transaction on a per-course basis since we often roll-out features
# on a per-course basis.
monitoring_utils.set_custom_metric('course_id', text_type(course_id))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
if CourseEntitlement.check_for_existing_entitlement_and_enroll(user=user, course_run_key=course_id):
return HttpResponse(reverse('courseware', args=[unicode(course_id)]))
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': text_type(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
REFUND_ORDER.send(sender=None, course_enrollment=enrollment)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""
JSON call to change a profile setting: Right now, location
"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
Handler that saves the user Signup Source when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
@transaction.non_atomic_requests
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is non-transactional except where explicitly wrapped in atomic to
alleviate deadlocks and improve performance. This means failures at
different places in registration can leave users in inconsistent
states.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
* Duplicate email raises a ValidationError (rather than the expected
AccountValidationError). Duplicate username returns an inconsistent
user message (i.e. "An account with the Public Username '{username}'
already exists." rather than "It looks like {username} belongs to an
existing account. Try again with a different username.") The two checks
occur at different places in the code; as a result, registering with
both a duplicate username and email raises only a ValidationError for
email only.
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow to define custom set of required/optional/hidden fields via configuration
extra_fields = configuration_helpers.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# registration via third party (Google, Facebook) using mobile application
# doesn't use social auth pipeline (no redirect uri(s) etc involved).
# In this case all related info (required for account linking)
# is sent in params.
# `third_party_auth_credentials_in_api` essentially means 'request
# is made from mobile application'
third_party_auth_credentials_in_api = 'provider' in params
is_third_party_auth_enabled = third_party_auth.is_enabled()
if is_third_party_auth_enabled and (pipeline.running(request) or third_party_auth_credentials_in_api):
params["password"] = generate_password()
# in case user is registering via third party (Google, Facebook) and pipeline has expired, show appropriate
# error message
if is_third_party_auth_enabled and ('social_auth_provider' in params and not pipeline.running(request)):
raise ValidationError(
{'session_expired': [
_(u"Registration using {provider} has timed out.").format(
provider=params.get('social_auth_provider'))
]}
)
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if len(eamap.external_name.strip()) >= accounts_settings.NAME_MIN_LENGTH:
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', [])
enforce_password_policy = not do_external_auth
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
third_party_provider = None
running_pipeline = None
new_user = None
# Perform operations within a transaction that are critical to account creation
with outer_atomic(read_committed=True):
# first, create the account
(user, profile, registration) = do_create_account(form, custom_form)
# If a 3rd party auth provider and credentials were provided in the API, link the account with social auth
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
if is_third_party_auth_enabled and third_party_auth_credentials_in_api:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline(social_access_token)
raise ValidationError({'access_token': [error_message]})
# If the user is registering via 3rd party auth, track which provider they use
if is_third_party_auth_enabled and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
new_user = authenticate_new_user(request, user.username, params['password'])
if not settings.APPSEMBLER_FEATURES.get('SKIP_LOGIN_AFTER_REGISTRATION', False):
django_login(request, new_user)
request.session.set_expiry(0)
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(
u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
# Check if system is configured to skip activation email for the current user.
skip_email = skip_activation_email(
user, do_external_auth, running_pipeline, third_party_provider, params,
)
if skip_email:
registration.activate()
else:
compose_and_send_activation_email(user, profile, registration)
# Perform operations that are non-critical parts of account creation
create_or_set_user_attribute_created_on_site(user, request.site, request)
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id,
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': text_type(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
# Announce registration
REGISTER_USER.send(sender=None, user=user, registration=registration)
create_comments_service_user(user)
try:
record_registration_attributions(request, new_user)
# Don't prevent a user from registering due to attribution errors.
except Exception: # pylint: disable=broad-except
log.exception('Error while attributing cookies to user registration.')
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
return new_user
def skip_activation_email(user, do_external_auth, running_pipeline, third_party_provider, params):
"""
Return `True` if activation email should be skipped.
Skip email if we are:
1. Doing load testing.
2. Random user generation for other forms of testing.
3. External auth bypassing activation.
4. Have the platform configured to not require e-mail activation.
5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
Note that this feature is only tested as a flag set one way or
the other for *new* systems. we need to be careful about
changing settings on a running system to make sure no users are
left in an inconsistent state (or doing a migration if they are).
Arguments:
user (User): Django User object for the current user.
do_external_auth (bool): True if external authentication is in progress.
running_pipeline (dict): Dictionary containing user and pipeline data for third party authentication.
third_party_provider (ProviderConfig): An instance of third party provider configuration.
params (dict): A copy of the request.POST dictionary.
Returns:
(bool): `True` if account activation email should be skipped, `False` if account activation email should be
sent.
"""
sso_pipeline_email = running_pipeline and running_pipeline['kwargs'].get('details', {}).get('email')
# Email is valid if the SAML assertion email matches the user account email or
# no email was provided in the SAML assertion. Some IdP's use a callback
# to retrieve additional user account information (including email) after the
# initial account creation.
valid_email = (
sso_pipeline_email == user.email or (
sso_pipeline_email is None and
third_party_provider and
getattr(third_party_provider, "identity_provider_type", None) == SAP_SUCCESSFACTORS_SAML_KEY
)
)
# log the cases where skip activation email flag is set, but email validity check fails
if third_party_provider and third_party_provider.skip_email_verification and not valid_email:
log.info(
'[skip_email_verification=True][user=%s][pipeline-email=%s][identity_provider=%s][provider_type=%s] '
'Account activation email sent as user\'s system email differs from SSO email.',
user.email,
sso_pipeline_email,
getattr(third_party_provider, "provider_id", None),
getattr(third_party_provider, "identity_provider_type", None)
)
return (
settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) or
settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') or
(settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH') and do_external_auth) or
(third_party_provider and third_party_provider.skip_email_verification and valid_email)
and
params.get('send_activation_email', True)
)
def record_affiliate_registration_attribution(request, user):
"""
Attribute this user's registration to the referring affiliate, if
applicable.
"""
affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME)
if user and affiliate_id:
UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id)
def record_utm_registration_attribution(request, user):
"""
Attribute this user's registration to the latest UTM referrer, if
applicable.
"""
utm_cookie_name = RegistrationCookieConfiguration.current().utm_cookie_name
utm_cookie = request.COOKIES.get(utm_cookie_name)
if user and utm_cookie:
utm = json.loads(utm_cookie)
for utm_parameter_name in REGISTRATION_UTM_PARAMETERS:
utm_parameter = utm.get(utm_parameter_name)
if utm_parameter:
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_PARAMETERS.get(utm_parameter_name),
utm_parameter
)
created_at_unixtime = utm.get('created_at')
if created_at_unixtime:
# We divide by 1000 here because the javascript timestamp generated is in milliseconds not seconds.
# PYTHON: time.time() => 1475590280.823698
# JS: new Date().getTime() => 1475590280823
created_at_datetime = datetime.datetime.fromtimestamp(int(created_at_unixtime) / float(1000), tz=UTC)
UserAttribute.set_user_attribute(
user,
REGISTRATION_UTM_CREATED_AT,
created_at_datetime
)
def record_registration_attributions(request, user):
"""
Attribute this user's registration based on referrer cookies.
"""
record_affiliate_registration_attribution(request, user)
record_utm_registration_attribution(request, user)
@csrf_exempt
@transaction.non_atomic_requests
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into header.html
"""
# Check if ALLOW_PUBLIC_ACCOUNT_CREATION flag turned off to restrict user account creation
if not configuration_helpers.get_value(
'ALLOW_PUBLIC_ACCOUNT_CREATION',
settings.FEATURES.get('ALLOW_PUBLIC_ACCOUNT_CREATION', True)
):
return HttpResponseForbidden(_("Account creation not allowed."))
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
return HttpResponseForbidden(SYSTEM_MAINTENANCE_MSG)
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': text_type(exc), 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(iteritems(exc.message_dict))
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""
When link in activation e-mail is clicked
"""
# If request is in Studio call the appropriate view
if theming_helpers.get_project_root_name().lower() == u'cms':
return activate_account_studio(request, key)
try:
registration = Registration.objects.get(activation_key=key)
except (Registration.DoesNotExist, Registration.MultipleObjectsReturned):
messages.error(
request,
HTML(_(
'{html_start}Your account could not be activated{html_end}'
'Something went wrong, please <a href="{support_url}">contact support</a> to resolve this issue.'
)).format(
support_url=configuration_helpers.get_value('SUPPORT_SITE_LINK', settings.SUPPORT_SITE_LINK),
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon'
)
else:
if registration.user.is_active:
messages.info(
request,
HTML(_('{html_start}This account has already been activated.{html_end}')).format(
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon',
)
elif waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
messages.error(
request,
HTML(u'{html_start}{message}{html_end}').format(
message=Text(SYSTEM_MAINTENANCE_MSG),
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon',
)
else:
registration.activate()
# Success message for logged in users.
message = _('{html_start}Success{html_end} You have activated your account.')
if not request.user.is_authenticated:
# Success message for logged out users
message = _(
'{html_start}Success! You have activated your account.{html_end}'
'You will now receive email updates and alerts from us related to'
' the courses you are enrolled in. Sign In to continue.'
)
# Add message for later use.
messages.success(
request,
HTML(message).format(
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon',
)
return redirect('dashboard')
@ensure_csrf_cookie
def activate_account_studio(request, key):
"""
When link in activation e-mail is clicked and the link belongs to studio.
"""
try:
registration = Registration.objects.get(activation_key=key)
except (Registration.DoesNotExist, Registration.MultipleObjectsReturned):
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
else:
user_logged_in = request.user.is_authenticated
already_active = True
if not registration.user.is_active:
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
return render_to_response('registration/activation_invalid.html',
{'csrf': csrf(request)['csrf_token']})
registration.activate()
already_active = False
return render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
@csrf_exempt
@require_POST
def password_reset(request):
"""
Attempts to send a password reset e-mail.
"""
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request)
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
destroy_oauth_tokens(request.user)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if UserRetirementRequest.has_user_requested_retirement(user):
# Refuse to reset the password of any user that has requested retirement.
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': _('Error in resetting your password.'),
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': SYSTEM_MAINTENANCE_MSG,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
if request.method == 'POST':
password = request.POST['new_password1']
try:
validate_password(password, user=user)
except ValidationError as err:
# We have a password reset attempt which violates some security
# policy, or any other validation. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err.message,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200:
form_valid = response.context_data['form'].is_valid() if response.context_data['form'] else False
if not form_valid:
log.warning(
u'Unable to reset password for user [%s] because form is not valid. '
u'A possible cause is that the user had an invalid reset token',
user.username,
)
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if email_exists_or_retired(new_email):
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception:
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
return render_to_response('email_change_failed.html', {'err_msg': SYSTEM_MAINTENANCE_MSG})
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""
Modify logged-in user's setting for receiving emails from a course.
"""
user = request.user
course_id = request.POST.get("course_id")
course_key = CourseKey.from_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
@ensure_csrf_cookie
def text_me_the_app(request):
"""
Text me the app view.
"""
text_me_fragment = TextMeTheAppFragmentView().render_to_fragment(request)
context = {
'nav_hidden': True,
'show_dashboard_tabs': True,
'show_program_listing': ProgramsApiConfig.is_enabled(),
'fragment': text_me_fragment
}
return render_to_response('text-me-the-app.html', context)
| agpl-3.0 | 8,491,777,773,612,389,000 | 39.850345 | 126 | 0.656864 | false |
rajashreer7/autotest-client-tests | linux-tools/nss_pam_ldapd/nss_pam_ldapd.py | 3 | 1154 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class nss_pam_ldapd(test.test):
"""
Autotest module for testing basic functionality
of nss_pam_ldapd
@author
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.call(test_path + '/nss_pam_ldapd' + '/nss_ldap.sh', shell=True)
if ret_val != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 | -7,909,553,592,994,969,000 | 23.553191 | 96 | 0.551127 | false |
tectronics/brahms-md | analysisScripts/general/getAverageProfile.py | 1 | 1548 | #!/usr/bin/python2
# Julien Michel june 2007
# usage is getAverageProfile.py file1.dat file2.dat file3.dat
# where fileX can be an electron density profile, or a lateral pressure profile.
# can probably be used for other outputs like electron pressure profile etc...
import os,sys
averages = []
resized = False
for file in sys.argv[1:]:
stream = open(file,'r')
buffer = stream.readlines()
stream.close()
if not resized:
#check = buffer[0].split()
check = len(buffer)
for x in range(0,check):
averages.append([])
resized = True
#print averages
for y in range(0,len(buffer)):
elems = buffer[y].split()
nprops = len(elems)
for x in range(0,len(elems)):
#print elems[x]
averages[y].append(float(elems[x]))
#print file
#print len(averages[0])
#print len(averages)
#for y in range(0,len(averages[0])):
# for x in range(0,len(averages)):
# #print x,y
# print averages[x][y]
# #for val in av:
# # print val
# #sys.exit(-1)
# Now collapse
for line in averages:
avgs = []
for x in range(0,nprops):
prop = 0
y = x
count = 0
while (y < len(line)):
prop += line[y]
count +=1
#print y,prop
y += nprops
prop /= count
#print prop
avgs.append(prop)
#print line
str = " "
for val in avgs:
str += "%8.5f " % val
print str
#sys.exit(-1)
| gpl-3.0 | -5,348,391,275,696,439,000 | 24.377049 | 80 | 0.540698 | false |
gsnedders/Template-Python | t/directory_test.py | 1 | 5161 | import os
from template.test import TestCase, main
class DirectoryTest(TestCase):
def testDirectory(self):
vars = { 'cwd': os.getcwd(),
'dir': 'test/dir',
'dot': '../..' }
self.Expect(DATA, None, vars)
# The final test is commented out, due to lack of VIEW support.
DATA = r"""
-- test --
[% TRY ;
USE Directory ;
CATCH ;
error ;
END
-%]
-- expect --
Directory error - no directory specified
-- test --
[% TRY ;
USE Directory('/no/such/place') ;
CATCH ;
error.type ; ' error on ' ; error.info.split(':').0 ;
END
-%]
-- expect --
Directory error on /no/such/place
-- test --
[% USE d = Directory(dir, nostat=1) -%]
[% d.path %]
-- expect --
-- process --
[% dir %]
-- test --
[% USE d = Directory(dir) -%]
[% d.path %]
-- expect --
-- process --
[% dir %]
-- test --
[% USE directory(dir) -%]
[% directory.path %]
-- expect --
-- process --
[% dir %]
-- test --
[% USE d = Directory(dir) -%]
[% FOREACH f = d.files -%]
- [% f.name %]
[% END -%]
[% FOREACH f = d.dirs; NEXT IF f.name == 'CVS'; -%]
* [% f.name %]
[% END %]
-- expect --
- file1
- file2
- xyzfile
* sub_one
* sub_two
-- test --
[% USE dir = Directory(dir) -%]
[% INCLUDE dir %]
[% BLOCK dir -%]
* [% dir.name %]
[% FOREACH f = dir.files -%]
- [% f.name %]
[% END -%]
[% FOREACH f = dir.dirs; NEXT IF f.name == 'CVS'; -%]
[% f.scan -%]
[% INCLUDE dir dir=f FILTER indent(4) -%]
[% END -%]
[% END -%]
-- expect --
* dir
- file1
- file2
- xyzfile
* sub_one
- bar
- foo
* sub_two
- waz.html
- wiz.html
-- test --
[% USE dir = Directory(dir) -%]
* [% dir.path %]
[% INCLUDE dir %]
[% BLOCK dir;
FOREACH f = dir.list ;
NEXT IF f.name == 'CVS';
IF f.isdir ; -%]
* [% f.name %]
[% f.scan ;
INCLUDE dir dir=f FILTER indent(4) ;
ELSE -%]
- [% f.name %]
[% END ;
END ;
END -%]
-- expect --
-- process --
* [% dir %]
- file1
- file2
* sub_one
- bar
- foo
* sub_two
- waz.html
- wiz.html
- xyzfile
-- test --
[% USE d = Directory(dir, recurse=1) -%]
[% FOREACH f = d.files -%]
- [% f.name %]
[% END -%]
[% FOREACH f = d.dirs; NEXT IF f.name == 'CVS'; -%]
* [% f.name %]
[% END %]
-- expect --
- file1
- file2
- xyzfile
* sub_one
* sub_two
-- test --
[% USE dir = Directory(dir, recurse=1, root=cwd) -%]
* [% dir.path %]
[% INCLUDE dir %]
[% BLOCK dir;
FOREACH f = dir.list ;
NEXT IF f.name == 'CVS';
IF f.isdir ; -%]
* [% f.name %] => [% f.path %] => [% f.abs %]
[% INCLUDE dir dir=f FILTER indent(4) ;
ELSE -%]
- [% f.name %] => [% f.path %] => [% f.abs %]
[% END ;
END ;
END -%]
-- expect --
-- process --
* [% dir %]
- file1 => [% dir %]/file1 => [% cwd %]/[% dir %]/file1
- file2 => [% dir %]/file2 => [% cwd %]/[% dir %]/file2
* sub_one => [% dir %]/sub_one => [% cwd %]/[% dir %]/sub_one
- bar => [% dir %]/sub_one/bar => [% cwd %]/[% dir %]/sub_one/bar
- foo => [% dir %]/sub_one/foo => [% cwd %]/[% dir %]/sub_one/foo
* sub_two => [% dir %]/sub_two => [% cwd %]/[% dir %]/sub_two
- waz.html => [% dir %]/sub_two/waz.html => [% cwd %]/[% dir %]/sub_two/waz.html
- wiz.html => [% dir %]/sub_two/wiz.html => [% cwd %]/[% dir %]/sub_two/wiz.html
- xyzfile => [% dir %]/xyzfile => [% cwd %]/[% dir %]/xyzfile
-- test --
[% USE dir = Directory(dir, recurse=1, root=cwd) -%]
* [% dir.path %]
[% INCLUDE dir %]
[% BLOCK dir;
FOREACH f = dir.list ;
NEXT IF f.name == 'CVS';
IF f.isdir ; -%]
* [% f.name %] => [% f.home %]
[% INCLUDE dir dir=f FILTER indent(4) ;
ELSE -%]
- [% f.name %] => [% f.home %]
[% END ;
END ;
END -%]
-- expect --
-- process --
* [% dir %]
- file1 => [% dot %]
- file2 => [% dot %]
* sub_one => [% dot %]
- bar => [% dot %]/..
- foo => [% dot %]/..
* sub_two => [% dot %]
- waz.html => [% dot %]/..
- wiz.html => [% dot %]/..
- xyzfile => [% dot %]
-- test --
[% USE dir = Directory(dir) -%]
[% file = dir.file('xyzfile') -%]
[% file.name %]
-- expect --
xyzfile
-- test --
[% USE dir = Directory('.', root=dir) -%]
[% dir.name %]
[% FOREACH f = dir.files -%]
- [% f.name %]
[% END -%]
-- expect --
.
- file1
- file2
- xyzfile
# -- test --
# [% VIEW filelist -%]
#
# [% BLOCK file -%]
# f [% item.name %] => [% item.path %]
# [% END -%]
#
# [% BLOCK directory; NEXT IF item.name == 'CVS'; -%]
# d [% item.name %] => [% item.path %]
# [% item.content(view) | indent -%]
# [% END -%]
#
# [% END -%]
# [% USE dir = Directory(dir, recurse=1) -%]
# [% filelist.print(dir) %]
# -- expect --
# -- process --
# d dir => [% dir %]
# f file1 => [% dir %]/file1
# f file2 => [% dir %]/file2
# d sub_one => [% dir %]/sub_one
# f bar => [% dir %]/sub_one/bar
# f foo => [% dir %]/sub_one/foo
# d sub_two => [% dir %]/sub_two
# f waz.html => [% dir %]/sub_two/waz.html
# f wiz.html => [% dir %]/sub_two/wiz.html
# f xyzfile => [% dir %]/xyzfile
#
#
#
"""
| artistic-2.0 | 2,206,214,656,264,825,000 | 19.726908 | 88 | 0.450494 | false |
crypotex/taas | taas/user/forms.py | 1 | 4141 | from django import forms
from django.contrib.auth import forms as auth_forms, get_user_model
from django.utils.translation import ugettext_lazy as _
class UserCreationForm(auth_forms.UserCreationForm):
required_css_class = 'required'
password1 = forms.CharField(label=_("Password"), min_length=8, max_length=64,
widget=forms.PasswordInput,
help_text=_("The length of the password has to be more than 8 characters."))
password2 = forms.CharField(label=_("Password confirmation"), min_length=8, max_length=64,
widget=forms.PasswordInput)
class Meta(object):
model = get_user_model()
fields = (
'first_name',
'last_name',
'email',
'password1',
'password2',
'phone_number'
)
class UserChangeFormAdmin(auth_forms.UserChangeForm):
class Meta(object):
model = get_user_model()
fields = '__all__'
class UserUpdateForm(forms.ModelForm):
change_password = forms.BooleanField(label=_("Change password"), required=False)
new_password1 = forms.CharField(label=_("New password"), min_length=8, max_length=64, required=False,
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"), min_length=8, max_length=64, required=False,
widget=forms.PasswordInput)
old_password = forms.CharField(label=_("Old password"), required=False,
widget=forms.PasswordInput)
class Meta(object):
model = get_user_model()
fields = ['first_name', 'last_name', 'phone_number',
'old_password', 'new_password1', 'new_password2']
def clean(self):
super(UserUpdateForm, self).clean()
if self.errors:
return
is_password_change = self.cleaned_data.get('change_password', False)
if is_password_change:
old_pass = self.cleaned_data['old_password']
new_pass1 = self.cleaned_data['new_password1']
new_pass2 = self.cleaned_data['new_password2']
if not old_pass:
raise forms.ValidationError(_('Old password is required.'))
elif not new_pass1:
raise forms.ValidationError(_('New password is required.'))
elif not new_pass2:
raise forms.ValidationError(_('New password confirmation is required.'))
if not self.instance.check_password(old_pass):
raise forms.ValidationError(
_('Your old password was entered incorrectly. Please enter it again.'))
if new_pass1 != new_pass2:
raise forms.ValidationError(_("The two new password fields didn't match."))
def save(self, commit=True):
if self.cleaned_data['change_password']:
self.instance.set_password(self.cleaned_data['new_password1'])
if commit:
self.instance.save()
return super(UserUpdateForm, self).save(commit)
class UserDeactivateForm(forms.Form):
def __init__(self, user, **kwargs):
super(UserDeactivateForm, self).__init__(**kwargs)
self.user = user
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def clean_password(self):
password = self.cleaned_data['password']
if not self.user.check_password(password):
raise forms.ValidationError(
_('Your password was entered incorrectly. Please enter it again.'))
class CustomPasswordSetForm(auth_forms.SetPasswordForm):
new_password1 = forms.CharField(label=_("New password"), min_length=8, max_length=64,
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"), min_length=8, max_length=64,
widget=forms.PasswordInput)
class AddBalanceForm(forms.Form):
amount = forms.IntegerField(min_value=1, label=_("Amount"))
| gpl-2.0 | 6,816,467,417,513,750,000 | 39.203883 | 118 | 0.602753 | false |
zhaochao/fuel-web | nailgun/nailgun/objects/role.py | 1 | 3212 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
from nailgun.api.v1.validators.json_schema.role import SCHEMA
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.objects.serializers.role import RoleSerializer
class Role(NailgunObject):
model = models.Role
schema = SCHEMA
serializer = RoleSerializer
@classmethod
def _update_release(cls, role, data):
# NOTE(dshulyak) roles data is stored in release metadata, from my pov
# this is bad, and should be fixed, but it is used all over the places
# and on UI, so lets work with the way it is stored now
volumes = data.get('volumes_roles_mapping', {})
meta = data.get('meta', {})
cls._update_volumes(role, volumes)
cls._update_meta(role, meta)
@classmethod
def _delete_from_release(cls, role):
volumes_meta = deepcopy(role.release.volumes_metadata)
del volumes_meta['volumes_roles_mapping'][role.name]
role.release.volumes_metadata = volumes_meta
roles_meta = deepcopy(role.release.roles_metadata)
del roles_meta[role.name]
role.release.roles_metadata = roles_meta
@classmethod
def _update_volumes(cls, role, volumes):
volumes_meta = deepcopy(role.release.volumes_metadata)
volumes_meta['volumes_roles_mapping'][role.name] = volumes
role.release.volumes_metadata = volumes_meta
@classmethod
def _update_meta(cls, role, meta):
roles_meta = deepcopy(role.release.roles_metadata)
roles_meta[role.name] = meta
role.release.roles_metadata = roles_meta
@classmethod
def create(cls, release, data):
role = cls.model(name=data['name'], release=release)
cls._update_release(role, data)
db().add(role)
db().flush()
return role
@classmethod
def update(cls, role, data):
role.name = data['name']
cls._update_release(role, data)
db().flush()
return role
@classmethod
def delete(cls, role):
cls._delete_from_release(role)
return super(Role, cls).delete(role)
@classmethod
def get_by_release_id_role_name(cls, release_id, role_name):
"""Get first role by release and role ids.
In case if role is not found return None.
"""
return db().query(cls.model).filter_by(
name=role_name, release_id=release_id).first()
class RoleCollection(NailgunCollection):
single = Role
| apache-2.0 | 5,119,622,813,998,003,000 | 29.884615 | 78 | 0.665006 | false |
apache/airflow | airflow/contrib/operators/sqoop_operator.py | 2 | 1140 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.apache.sqoop.operators.sqoop`."""
import warnings
from airflow.providers.apache.sqoop.operators.sqoop import SqoopOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.sqoop.operators.sqoop`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | 3,177,801,697,294,219,000 | 39.714286 | 98 | 0.769298 | false |
marmyshev/transitions | openlp/plugins/songs/forms/songmaintenancedialog.py | 1 | 9854 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtCore, QtGui
from openlp.core.lib import UiStrings, build_icon
from openlp.core.lib.ui import create_button_box
from openlp.plugins.songs.lib.ui import SongStrings
class Ui_SongMaintenanceDialog(object):
def setupUi(self, songMaintenanceDialog):
songMaintenanceDialog.setObjectName(u'songMaintenanceDialog')
songMaintenanceDialog.setWindowModality(QtCore.Qt.ApplicationModal)
songMaintenanceDialog.resize(10, 350)
self.dialogLayout = QtGui.QGridLayout(songMaintenanceDialog)
self.dialogLayout.setObjectName(u'dialog_layout')
self.typeListWidget = QtGui.QListWidget(songMaintenanceDialog)
self.typeListWidget.setIconSize(QtCore.QSize(32, 32))
self.typeListWidget.setUniformItemSizes(True)
self.typeListWidget.setObjectName(u'typeListWidget')
self.listItemAuthors = QtGui.QListWidgetItem(self.typeListWidget)
self.listItemAuthors.setIcon(build_icon(u':/songs/author_maintenance.png'))
self.listItemTopics = QtGui.QListWidgetItem(self.typeListWidget)
self.listItemTopics.setIcon(build_icon(u':/songs/topic_maintenance.png'))
self.listItemBooks = QtGui.QListWidgetItem(self.typeListWidget)
self.listItemBooks.setIcon(build_icon(u':/songs/book_maintenance.png'))
self.dialogLayout.addWidget(self.typeListWidget, 0, 0)
self.stackedLayout = QtGui.QStackedLayout()
self.stackedLayout.setObjectName(u'stackedLayout')
# authors page
self.authorsPage = QtGui.QWidget(songMaintenanceDialog)
self.authorsPage.setObjectName(u'authorsPage')
self.authorsLayout = QtGui.QVBoxLayout(self.authorsPage)
self.authorsLayout.setObjectName(u'authorsLayout')
self.authorsListWidget = QtGui.QListWidget(self.authorsPage)
self.authorsListWidget.setObjectName(u'authorsListWidget')
self.authorsLayout.addWidget(self.authorsListWidget)
self.authorsButtonsLayout = QtGui.QHBoxLayout()
self.authorsButtonsLayout.setObjectName(u'authorsButtonsLayout')
self.authorsButtonsLayout.addStretch()
self.authorsAddButton = QtGui.QPushButton(self.authorsPage)
self.authorsAddButton.setIcon(build_icon(u':/songs/author_add.png'))
self.authorsAddButton.setObjectName(u'authorsAddButton')
self.authorsButtonsLayout.addWidget(self.authorsAddButton)
self.authorsEditButton = QtGui.QPushButton(self.authorsPage)
self.authorsEditButton.setIcon(build_icon(u':/songs/author_edit.png'))
self.authorsEditButton.setObjectName(u'authorsEditButton')
self.authorsButtonsLayout.addWidget(self.authorsEditButton)
self.authorsDeleteButton = QtGui.QPushButton(self.authorsPage)
self.authorsDeleteButton.setIcon(build_icon(u':/songs/author_delete.png'))
self.authorsDeleteButton.setObjectName(u'authorsDeleteButton')
self.authorsButtonsLayout.addWidget(self.authorsDeleteButton)
self.authorsLayout.addLayout(self.authorsButtonsLayout)
self.stackedLayout.addWidget(self.authorsPage)
# topics page
self.topicsPage = QtGui.QWidget(songMaintenanceDialog)
self.topicsPage.setObjectName(u'topicsPage')
self.topicsLayout = QtGui.QVBoxLayout(self.topicsPage)
self.topicsLayout.setObjectName(u'topicsLayout')
self.topicsListWidget = QtGui.QListWidget(self.topicsPage)
self.topicsListWidget.setObjectName(u'topicsListWidget')
self.topicsLayout.addWidget(self.topicsListWidget)
self.topicsButtonsLayout = QtGui.QHBoxLayout()
self.topicsButtonsLayout.setObjectName(u'topicsButtonLayout')
self.topicsButtonsLayout.addStretch()
self.topicsAddButton = QtGui.QPushButton(self.topicsPage)
self.topicsAddButton.setIcon(build_icon(u':/songs/topic_add.png'))
self.topicsAddButton.setObjectName(u'topicsAddButton')
self.topicsButtonsLayout.addWidget(self.topicsAddButton)
self.topicsEditButton = QtGui.QPushButton(self.topicsPage)
self.topicsEditButton.setIcon(build_icon(u':/songs/topic_edit.png'))
self.topicsEditButton.setObjectName(u'topicsEditButton')
self.topicsButtonsLayout.addWidget(self.topicsEditButton)
self.topicsDeleteButton = QtGui.QPushButton(self.topicsPage)
self.topicsDeleteButton.setIcon(build_icon(u':/songs/topic_delete.png'))
self.topicsDeleteButton.setObjectName(u'topicsDeleteButton')
self.topicsButtonsLayout.addWidget(self.topicsDeleteButton)
self.topicsLayout.addLayout(self.topicsButtonsLayout)
self.stackedLayout.addWidget(self.topicsPage)
# song books page
self.booksPage = QtGui.QWidget(songMaintenanceDialog)
self.booksPage.setObjectName(u'booksPage')
self.booksLayout = QtGui.QVBoxLayout(self.booksPage)
self.booksLayout.setObjectName(u'booksLayout')
self.booksListWidget = QtGui.QListWidget(self.booksPage)
self.booksListWidget.setObjectName(u'booksListWidget')
self.booksLayout.addWidget(self.booksListWidget)
self.booksButtonsLayout = QtGui.QHBoxLayout()
self.booksButtonsLayout.setObjectName(u'booksButtonLayout')
self.booksButtonsLayout.addStretch()
self.booksAddButton = QtGui.QPushButton(self.booksPage)
self.booksAddButton.setIcon(build_icon(u':/songs/book_add.png'))
self.booksAddButton.setObjectName(u'booksAddButton')
self.booksButtonsLayout.addWidget(self.booksAddButton)
self.booksEditButton = QtGui.QPushButton(self.booksPage)
self.booksEditButton.setIcon(build_icon(u':/songs/book_edit.png'))
self.booksEditButton.setObjectName(u'booksEditButton')
self.booksButtonsLayout.addWidget(self.booksEditButton)
self.booksDeleteButton = QtGui.QPushButton(self.booksPage)
self.booksDeleteButton.setIcon(build_icon(u':/songs/book_delete.png'))
self.booksDeleteButton.setObjectName(u'booksDeleteButton')
self.booksButtonsLayout.addWidget(self.booksDeleteButton)
self.booksLayout.addLayout(self.booksButtonsLayout)
self.stackedLayout.addWidget(self.booksPage)
#
self.dialogLayout.addLayout(self.stackedLayout, 0, 1)
self.button_box = create_button_box(songMaintenanceDialog, u'button_box', [u'close'])
self.dialogLayout.addWidget(self.button_box, 1, 0, 1, 2)
self.retranslateUi(songMaintenanceDialog)
self.stackedLayout.setCurrentIndex(0)
QtCore.QObject.connect(self.typeListWidget, QtCore.SIGNAL(u'currentRowChanged(int)'),
self.stackedLayout.setCurrentIndex)
def retranslateUi(self, songMaintenanceDialog):
songMaintenanceDialog.setWindowTitle(SongStrings.SongMaintenance)
self.listItemAuthors.setText(SongStrings.Authors)
self.listItemTopics.setText(SongStrings.Topics)
self.listItemBooks.setText(SongStrings.SongBooks)
self.authorsAddButton.setText(UiStrings().Add)
self.authorsEditButton.setText(UiStrings().Edit)
self.authorsDeleteButton.setText(UiStrings().Delete)
self.topicsAddButton.setText(UiStrings().Add)
self.topicsEditButton.setText(UiStrings().Edit)
self.topicsDeleteButton.setText(UiStrings().Delete)
self.booksAddButton.setText(UiStrings().Add)
self.booksEditButton.setText(UiStrings().Edit)
self.booksDeleteButton.setText(UiStrings().Delete)
typeListWidth = max(self.fontMetrics().width(SongStrings.Authors),
self.fontMetrics().width(SongStrings.Topics), self.fontMetrics().width(SongStrings.SongBooks))
self.typeListWidget.setFixedWidth(typeListWidth + self.typeListWidget.iconSize().width() + 32)
| gpl-2.0 | 908,024,984,709,142,800 | 62.153846 | 106 | 0.68717 | false |
jia-kai/hearv | disp_freq.py | 1 | 1942 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# $File: disp_freq.py
# $Date: Sun Nov 23 12:45:25 2014 +0800
# $Author: jiakai <[email protected]>
import matplotlib.pyplot as plt
import numpy as np
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('fpath', help='array json fpath')
parser.add_argument('--sample_rate', type=float, default=59.940)
parser.add_argument('--fl', type=float, default=5,
help='low cutoff')
parser.add_argument('--dmin', type=int, help='min index of data')
parser.add_argument('--dnr', type=int, help='number of data points used')
parser.add_argument('--no_shift_mean', action='store_true',
help='do not shift mean value to zero')
parser.add_argument('--clip', type=float,
help='clip all samples to be within range [-x, x]')
parser.add_argument('-o', '--output',
help='outpout the plot')
args = parser.parse_args()
with open(args.fpath) as fin:
vals = np.array(json.load(fin))
if not args.no_shift_mean:
vals -= np.mean(vals)
if args.clip:
vals = np.clip(vals, -args.clip, args.clip)
if args.dmin:
vals = vals[args.dmin:]
if args.dnr:
vals = vals[:args.dnr]
fig = plt.figure()
ax = fig.add_subplot(2, 1, 1)
ax.set_xlabel('sample number')
ax.set_ylabel('displacement')
ax.plot(vals)
fft = np.fft.fft(vals)[:len(vals) / 2]
freq = args.sample_rate / len(vals) * np.arange(1, len(fft) + 1)
if args.fl > 0:
fl = min(np.nonzero(freq >= args.fl)[0])
fft = fft[fl:]
freq = freq[fl:]
ax = fig.add_subplot(2, 1, 2)
ax.set_xlabel('freq')
ax.set_ylabel('amplitude')
ax.plot(freq, np.abs(fft))
if args.output:
fig.savefig(args.output)
plt.show()
if __name__ == '__main__':
main()
| unlicense | 6,850,090,640,253,229,000 | 29.34375 | 77 | 0.57827 | false |
pu6ki/elsyser | elsyser/settings.py | 1 | 4974 | """
Django settings for elsyser project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o&^!dw@l88j8_a#55kq=bgfnupc5(y%!es5^c_cyvp=t_z0%ey'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['10.20.1.109', '84.238.224.47']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_word_filter',
'rest_framework_docs',
'djoser',
'corsheaders',
'vote',
'students',
'news',
'exams',
'homeworks',
'materials',
'talks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'elsyser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'elsyser.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Cross-Origin Resource Sharing settings
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = (
'accept',
'accept-encoding',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
)
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Sofia'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Email SMTP settings
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ.get('DJANGO_EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = os.environ.get('DJANGO_EMAIL_HOST_PASSWORD', '')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = '[email protected]'
# Django REST Framework settings
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 5,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
# Djoser settings
DJOSER = {
'DOMAIN': 'elsyser.netlify.com',
'SITE_NAME': 'ELSYSER',
'PASSWORD_RESET_CONFIRM_URL': '#/auth/password/reset/confirm/{uid}/{token}',
'PASSWORD_RESET_SHOW_EMAIL_NOT_FOUND': True
}
| mit | -2,167,837,604,075,419,100 | 23.502463 | 91 | 0.681946 | false |
moertle/pyaas | pyaas/web/auth/facebook.py | 1 | 2166 |
import datetime
import pyaas
import tornado.auth
#
# Facebook Authentication
#
FBURL = '%s://%s/login/facebook?next=%s'
class Facebook(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.asynchronous
def get(self):
redir = tornado.escape.url_escape(self.get_argument('next', '/'))
my_url = FBURL % (self.request.protocol, self.request.host, redir)
code = self.get_argument('code', None)
if code:
self.get_authenticated_user(
redirect_uri = my_url,
client_id = pyaas.config.get('facebook', 'api_key'),
client_secret = pyaas.config.get('facebook', 'secret'),
code = code,
callback = self._on_auth
)
else:
self.authorize_redirect(
redirect_uri = my_url,
client_id = pyaas.config.get('facebook', 'api_key'),
extra_params = {'scope': 'email'}
)
def _on_auth(self, fbuser):
if not fbuser:
raise tornado.web.HTTPError(500, 'Facebook authentication failed')
profile = pyaas.db.FindProfile('fbid', fbuser['id'])
if not profile:
self.facebook_request(
'/me',
access_token = fbuser['access_token'],
callback = self._on_me
)
else:
self.set_secure_cookie('uid', str(profile['uid']))
self.redirect(self.get_argument('next', '/'))
def _on_me(self, fbuser):
profile = pyaas.db.FindProfile('email', fbuser['email'])
if not profile:
profile = dict(
email = fbuser['email'],
display = fbuser['name'],
fbid = fbuser['id'],
firstLogin = datetime.datetime.now()
)
uid = pyaas.db.SaveProfile(profile)
self.set_secure_cookie('uid', str(uid))
else:
self.set_secure_cookie('uid', str(profile['uid']))
# TODO: update facebook id
self.redirect(self.get_argument('next', '/'))
| mit | -3,376,014,447,902,641,000 | 29.942857 | 78 | 0.519391 | false |
ansp-2015/arquea | protocolo/forms.py | 1 | 13060 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from django.forms.utils import ErrorList
from django.utils.translation import ugettext_lazy as _
from models import Cotacao, TipoDocumento, Protocolo, Feriado, Arquivo, Descricao
from outorga.models import Termo
from identificacao.models import Entidade, Identificacao
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
# class ContratoAdminForm(forms.ModelForm):
#
# """
# Uma instância dessa classe faz algumas definições/limitações para a tela de cadastramento do modelo 'Contrato'.
#
# A função '__init__': Define o campo 'data_vencimento' como obrigatório no cadastramento de um Contrato.
# Define um novo 'label' para o campo que indica o contrato anterior e permite selecionar como
# 'contrato anterior' os protocolos definidos como 'Contrato' ou 'Ordem de Serviço'
# Define um novo 'label' para o campo 'identificacao'.
# Limita a seleção do tipo do documento apenas para as opções 'Contrato' e 'Ordem de Serviço'.
# Cria um campo 'entidade' para filtrar o campo identificação.
# A 'class Meta' define o modelo que será utilizado.
# """
#
#
# entidade = forms.ModelChoiceField(Entidade.objects.all(), required=False,
# widget=forms.Select(attrs={'onchange': 'filter_select("id_identificacao", "id_entidade");'}))
#
#
# class Meta:
# model = Contrato
#
#
# class Media:
# js = ('/media/js/selects.js', '/media/js/protocolo.js')
#
#
# # Redefine os campos 'data_vencimento', 'protocolo', 'tipo_documento' e 'identificacao'.
# def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
# initial=None, error_class=ErrorList, label_suffix=':',
# empty_permitted=False, instance=None):
#
# super(ContratoAdminForm, self).__init__(data, files, auto_id, prefix, initial,
# error_class, label_suffix, empty_permitted, instance)
#
# # Define a data de vencimento como obrigatória.
# dv = self.fields['data_vencimento']
# dv.required = True
#
#
# # Define novo 'label' para o campo do protocolo anterior e permite selecionar apenas 'Contrato'.
# pt = self.fields['protocolo']
# pt.label = u'Contrato anterior'
# pt.queryset = Protocolo.objects.filter(tipo_documento__nome__in=[u'Contrato'])
#
#
# # Permite selecionar apenas as opções 'Contrato' e 'Ordem de Serviço' no tipo do documento.
# tp = self.fields['tipo_documento']
# tp.queryset = TipoDocumento.objects.filter(nome__in=[u'Contrato', u'Ordem de Serviço'])
#
#
# # Define novo 'label' para o campo da identificação.
# iden = self.fields['identificacao']
# iden.label = u'Contato'
class CotacaoAdminForm(forms.ModelForm):
"""
Uma instância dessa classe faz algumas definições para a tela de cadastramento do modelo 'Contacao'.
A função '__init__' Define um novo 'label' para o campo que indica o pedido anterior e permite selecionar como
pedido anterior apenas os protocolos diferentes de 'Contrato' e 'Ordem de Serviço'.
Define que o campo 'protocolo' será filtrado pelo campo 'termo'.
Define um novo 'label' para o campo 'identificacao'.
Cria novos campos 'termo' para filtrar o campo 'protocolo'.
'entidade' para filtrar o campo 'identificacao'.
A 'class Meta' define o modelo que será utilizado.
"""
termo = forms.ModelChoiceField(Termo.objects.all(),
widget=forms.Select(
attrs={'onchange': 'filter_select("id_protocolo", "id_termo");'}))
entidade = forms.ModelChoiceField(Entidade.objects.all(), required=False,
widget=forms.Select(
attrs={'onchange': 'filter_select("id_identificacao", "id_entidade");'}))
class Meta:
model = Cotacao
fields = ['estado', 'termo', 'entidade', 'identificacao', 'descricao2', 'moeda_estrangeira', 'data_validade',
'data_chegada', 'origem', 'valor_total', 'obs', 'aceito', 'entrega', 'protocolo', 'parecer']
class Media:
js = ('js/selects.js', 'js/protocolo.js')
# Redefine os campos 'protocolo' e 'identificacao'.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
super(CotacaoAdminForm, self).__init__(data, files, auto_id, prefix, initial,
error_class, label_suffix, empty_permitted, instance)
# Gera uma lista com os tipos de documento diferentes de 'Cotação', 'Contrato' e 'Ordem de Serviço'
nomes = []
tipo = TipoDocumento.objects.all()
for t in tipo:
if t.nome.lower() != u'contrato' and t.nome.lower() != u'ordem de serviço' and t.nome.lower() != u'cotação':
nomes.append(t.nome)
# # Define novo 'label' e permite selecionar protocolos diferentes de 'Cotação', 'Contrato' e
# 'Ordem de Serviço'.
# pt = self.fields['protocolo']
# pt.label = u'Pedido'
# pt.required = True
# pt.queryset = Protocolo.objects.filter(tipo_documento__nome__in=nomes)
# Define novo 'label' para o campo da identificação.
iden = self.fields['identificacao']
iden.label = u'Contato'
self.fields['protocolo'].choices = [('', '---------')] + \
[(p.id, p.__unicode__())
for p in Protocolo.objects.all().prefetch_related('itemprotocolo_set')
.select_related('tipo_documento').order_by('data_vencimento')]
self.fields['identificacao'].choices = [('', '---------')] + \
[(p.id, p.__unicode__())
for p in Identificacao.objects.all()
.select_related('endereco', 'endereco__entidade', 'contato')]
self.fields['descricao2'].choices = [('', '---------')] + \
[(p.id, p.__unicode__())
for p in Descricao.objects.all().select_related('entidade',)]
class ProtocoloAdminForm(forms.ModelForm):
"""
Uma instância dessa classe faz algumas definições/limitações para a tela de cadastramento do modelo 'Protocolo'.
A função '__init__': Permite selecionar como pedido anterior os protocolos diferentes de 'Contrato', 'Cotação',
ou 'Ordem de Serviço'.
Cria o campo 'entidade' para filtrar o campo 'identificacao'.
A 'class Meta' define o modelo que será utilizado.
"""
# entidade = forms.ModelChoiceField(Entidade.objects.all(), required=False,
# widget=forms.Select(attrs={'onchange': 'filter_select("id_identificacao", "id_entidade");'}))
# referencia = forms.ChoiceField(choices=[(obj['descricao'], obj['descricao']) for obj in
# Protocolo.objects.order_by().values('descricao').distinct()],
# label='Referente a', widget=forms.Select(attrs={'onchange':'referente("id_referencia", "id_descricao");'}))
class Meta:
model = Protocolo
fields = ['data_chegada', 'origem', 'valor_total', 'obs', 'estado', 'termo', 'descricao2', 'tipo_documento',
'num_documento', 'moeda_estrangeira', 'referente', 'procedencia', 'data_validade', 'data_vencimento',
'responsavel']
class Media:
js = ('js/selects.js', 'js/protocolo.js',)
# Verifica se o termo do protocolo é o mesmo termo do item do pedido de outorga relacionada a despesa
# desse protocolo.
# def clean(self):
# cleaned_data = self.cleaned_data
# termo1 = cleaned_data['termo']
# proto = self.instance
# if proto and termo1:
# try:
# despesa = Despesa.objects.filter(protocolo=proto)
# except ObjectDoesNotExist:
# return cleaned_data
# for d in despesa:
# item = d.item_pedido
# if item:
# termo2 = item.natureza_gasto.outorga.termo
# if termo1 != termo2:
# raise forms.ValidationError(_(u'Este protocolo possui despesa atrelada a
# outro termo de outorga'))
# return cleaned_data
# Redefine o campo 'protocolo'.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
termos = Termo.objects.order_by('-ano')
if termos and not instance:
initial = {'termo': termos[0].id}
super(ProtocoloAdminForm, self).__init__(data, files, auto_id, prefix, initial,
error_class, label_suffix, empty_permitted, instance)
# Gera uma lista com os tipos de documento diferentes de 'Contrato' e 'Ordem de Serviço'
nomes = []
tipo = TipoDocumento.objects.all()
for t in tipo:
if t.nome.lower() != u'contrato' and t.nome.lower() != u'ordem de serviço' and t.nome.lower() != u'cotação':
nomes.append(t.nome)
# Permite selecionar protocolos diferentes de 'Contrato', 'Ordem de Serviço' e 'Cotação'.
# pt = self.fields['protocolo']
# pt.queryset = Protocolo.objects.filter(tipo_documento__nome__in=nomes)
class ItemAdminForm(forms.ModelForm):
marca = forms.CharField(max_length=100, required=False, label=_('Marca'))
modelo = forms.CharField(max_length=100, required=False, label=_('Modelo'))
ns = forms.CharField(max_length=30, required=False, label=_(u'Número de série'))
class FeriadoAdminForm(forms.ModelForm):
def clean(self):
cleaned_data = super(FeriadoAdminForm, self).clean()
feriado = self.cleaned_data.get('feriado')
tipo = self.cleaned_data.get('tipo')
# Verifica se um feriado fixo ocorre na data especificada do tipo de feriado
if tipo and not tipo.movel and (tipo.dia != feriado.day or tipo.mes != feriado.month):
self._errors["tipo"] = self.error_class([u"Feriado fixo deve ser no mesmo dia/mês especificado no tipo do "
u"feriado. Este feriado ocorre no dia %s/%s" %
(tipo.dia, tipo.mes)])
del cleaned_data["tipo"]
fid = self.cleaned_data.get('id')
# Verifica se já há uma data de feriado cadastrada no mesmo dia
f = Feriado.objects.filter(feriado=feriado)
if f.count() > 0 and fid and f.id != fid:
raise forms.ValidationError(u"O feriado nesta data já existe.")
return self.cleaned_data
class TipoFeriadoAdminForm(forms.ModelForm):
def clean(self):
cleaned_data = super(TipoFeriadoAdminForm, self).clean()
movel = self.cleaned_data.get('movel')
dia = self.cleaned_data.get('dia')
mes = self.cleaned_data.get('mes')
# Verifica se um feriado fixo ocorre na data especificada do tipo de feriado
if not movel:
if not dia:
self._errors["dia"] = self.error_class([u"Feriado fixo deve ter o dia especificado"])
del cleaned_data["dia"]
if not mes:
self._errors["mes"] = self.error_class([u"Feriado fixo deve ter o mês especificado"])
del cleaned_data["mes"]
return self.cleaned_data
class ArquivoAdminForm(forms.ModelForm):
protocolo = forms.ModelChoiceField(Protocolo.objects.all().select_related('tipo_documento'))
class Meta:
model = Arquivo
fields = ['protocolo', 'arquivo']
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
if instance:
if initial:
initial.update({'protocolo': instance.protocolo})
else:
initial = {'protocolo': instance.protocolo}
super(ArquivoAdminForm, self).__init__(data, files, auto_id, prefix, initial,
error_class, label_suffix, empty_permitted, instance)
| mpl-2.0 | 6,822,829,020,151,119,000 | 44.582456 | 120 | 0.589331 | false |
Agrajag-Petunia/existential-romantic-novel | run.py | 1 | 1939 | from datetime import datetime
from src.textgenerator import TextGenerator
from src.utils import extract_project_gutenberg_novel, remove_titles
# The source files to use
# NOTE: Ulysses makes the generated text just a little too weird.
files = [
'./data/life_and_amours.txt',
'./data/memoirs_of_fanny_hill.txt',
'./data/metamorphosis.txt',
'./data/the_romance_of_lust.txt',
'./data/the_trial.txt',
# './data/ulysses.txt',
'./data/the_antichrist.txt',
'./data/beyond_good_and_evil.txt',
]
total_word_count = 50000
chapters = 23
words_per_chapter = int(50000 / 23)
output = ""
# Build our text generator (I found a prefix length of 2 or 3 worked best)
model = TextGenerator(prefix_length=3)
# Just to remind you which novels are being used
print(files)
# Iterate over our files
for filename in files:
# For each file read in the text and work it into our
# model.
with open(filename, 'r') as fobj:
print('Learning text from {}...'.format(filename))
# remove project gutenberg license stuff from the text
text = extract_project_gutenberg_novel(fobj.read())
# Strip the title, chapters, etc from the text.
text = remove_titles(text)
# Learn the cleaned up text
model.learn(text)
# Start generating our novel
with open('./data/novel.txt', 'w') as fobj:
# Start by printing out summary content
fobj.write("You are free and that is why you lust\n")
fobj.write("=====================================\n\n")
fobj.write("Author: Agrajag Petunia's computer\n")
fobj.write("Generation Date: {}\n\n".format(
datetime.now().strftime("%Y-%m-%d")
))
# For each chapter generate some text
for c in range(1, chapters + 1):
fobj.write("\n\n\tChapter {}\n".format(c))
fobj.write("---------------------------\n")
output = model.generate(size=words_per_chapter)
fobj.write(output)
| mit | -3,681,694,281,724,751,400 | 31.316667 | 74 | 0.636411 | false |
vshandubey/movie-database | ui/home/MovieTable.py | 1 | 2833 |
from PyQt4 import QtGui, QtCore
from core.repository import MovieRepository
import core.Constants as Constants
class MovieTable(QtGui.QTableWidget):
def __init__(self, mainWindow):
self.mainWindow = mainWindow
super(MovieTable, self).__init__()
#Init database
self.movieRepository = MovieRepository()
self.addHeaders()
#set column width
self.setColumnWidth(0, 30)
self.setColumnWidth(1, 250)
self.setColumnWidth(3, 150)
self.setWordWrap(True)
self.setAlternatingRowColors(True)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.horizontalHeader().setStretchLastSection(True)
self.setShowGrid(False)
self.setStyleSheet(Constants.WIDGET_STYLE_WITH_BORDER)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.connect(self, QtCore.SIGNAL("cellDoubleClicked(int, int)"), self.rowClicked)
def addHeaders(self):
#Add Headers to table
headers = ["ID","Name","Spoken Language", "Status","Storage","Comments"]
for columnId, header in enumerate(headers):
self.insertColumn(columnId)
headerItem = QtGui.QTableWidgetItem(header)
self.setHorizontalHeaderItem(columnId, headerItem)
self.verticalHeader().setVisible(False)
def updateData(self, movies):
#Clear Old Data.
self.clearContents()
self.setRowCount(len(movies))
#Add Data to this table
for i in range(len(movies)):
movie = movies[i]
self.createTableCell(i, 0, movie._id)
self.createTableCell(i, 1, movie.name)
self.createTableCell(i, 2, movie.spokenLanguage)
self.createTableCell(i, 3, movie.status)
self.createTableCell(i, 4, movie.storage)
self.createTableCell(i, 5, movie.comments)
def createTableCell(self, row, column, cellData, editable = False):
cell = QtGui.QTableWidgetItem(str(cellData),
QtGui.QTableWidgetItem.UserType)
if not editable:
cell.setFlags(QtCore.Qt.ItemFlags(QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable))
self.setItem(row, column, cell);
def rowClicked(self, row, column):
#Name for the movie selected in the table
nameItem = self.item(row, 1).text()
movieModel = self.movieRepository.findMovieByName(str(nameItem))
self.mainWindow.updateView(movieModel);
| apache-2.0 | 7,562,268,095,940,497,000 | 35.320513 | 92 | 0.606777 | false |
gbugaisky/bimm_185_conotoxin | wip-scripts_data/kNNProc.py | 1 | 1321 | #!usr/bin/env/python
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import neighbors
def kNNGen(trainfile, testfile):
features = np.genfromtxt(trainfile, delimiter=' ', usecols=(0, 1, 2))
labels = np.genfromtxt(trainfile, delimiter=' ', usecols=(-1))
tests = np.genfromtxt(testfile, delimiter=' ', usecols=(0, 1, 2))
testlabels = np.genfromtxt(testfile, delimiter=' ', usecols=(-1))
n_neighbors = 10
h = 0.02
accuracyScores = []
for weights in ['uniform', 'distance']:
clf = neighbors.KNeighborsClassifier(n_neighbors, leaf_size=20, weights=weights)
clf.fit(features, labels)
accuracyScores.append(clf.score(tests, testlabels))
return accuracyScores
if __name__ == "__main__":
FILEPATH = ".\\SeparatedTrainTest\\"
accuracyVals = []
for i in range(0, 10):
accuracyVals.append(kNNGen(FILEPATH + "trainDataSet" + str(i) + ".csv", FILEPATH + "testDataSet" + str(i) + ".csv"))
uniformScore = 0
distanceScore = 0
with open("kNNAverageAccuracy.txt", 'w') as results:
for element in accuracyVals:
results.write(str(element) + '\n')
uniformScore += element[0]
distanceScore += element[1]
results.write("Uniform kNN Accuracy: " + str(uniformScore / 10.0) + '\n')
results.write("Distance kNN Accuracy: " + str(distanceScore / 10.0) + '\n') | gpl-2.0 | 6,156,723,284,375,252,000 | 33.789474 | 118 | 0.695685 | false |
jeromekelleher/msprime | msprime/ancestry.py | 1 | 80216 | #
# Copyright (C) 2015-2021 University of Oxford
#
# This file is part of msprime.
#
# msprime is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# msprime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with msprime. If not, see <http://www.gnu.org/licenses/>.
#
"""
Module responsible for defining and running ancestry simulations.
"""
from __future__ import annotations
import collections.abc
import copy
import dataclasses
import enum
import inspect
import json
import logging
import math
import struct
import sys
from typing import Any
from typing import ClassVar
from typing import Union
import numpy as np
import tskit
from . import core
from . import demography as demog
from . import intervals
from . import mutations
from . import provenance
from msprime import _msprime
logger: logging.Logger = logging.getLogger(__name__)
def _model_factory(model: Union[None, str, AncestryModel]) -> AncestryModel:
"""
Returns an AncestryModel corresponding to the specified model
description.
- If model is None, the default simulation model is returned.
- If model is a string, return the corresponding model instance.
- If model is an instance of AncestryModel, return it unchanged.
- Otherwise raise a type error.
"""
model_map = {
"hudson": StandardCoalescent(),
"smc": SmcApproxCoalescent(),
"smc_prime": SmcPrimeApproxCoalescent(),
"dtwf": DiscreteTimeWrightFisher(),
"wf_ped": WrightFisherPedigree(),
}
if model is None:
model_instance = StandardCoalescent()
elif isinstance(model, str):
lower_model = model.lower()
if lower_model not in model_map:
raise ValueError(
"Model '{}' unknown. Choose from {}".format(
model, list(model_map.keys())
)
)
model_instance = model_map[lower_model]
elif not isinstance(model, AncestryModel):
raise TypeError(
"Ancestry model must be a string or an instance of AncestryModel"
)
else:
model_instance = model
return model_instance
def _parse_model_arg(model_arg):
"""
Parses the specified model argument from the sim_ancestry function,
returning the list of models.
"""
# TODO be more lenient about what we accept as input here. Ideally
# we'd like to support generators and consume them during the
# actual simulation, but that would raise complications for how
# to deal with replication.
if isinstance(model_arg, (list, tuple)):
if len(model_arg) < 1:
raise ValueError("Must specify at least one AncestryModel")
models = [_model_factory(model_desc) for model_desc in model_arg]
else:
models = [_model_factory(model_arg)]
return models
def _resolve_models(events):
model_change_events = []
for event in events:
assert isinstance(event, SimulationModelChange)
# We don't want to modify our inputs, so take a deep copy.
event = copy.copy(event)
event.model = _model_factory(event.model)
model_change_events.append(event)
return model_change_events
def _filter_events(demographic_events):
"""
Returns a tuple (demographic_events, model_change_events) which separates
out the SimulationModelChange events from the list. This is to support the
pre-1.0 syntax for model changes, where they were included in the
demographic_events parameter.
"""
filtered_events = []
model_change_events = []
for event in demographic_events:
if isinstance(event, SimulationModelChange):
model_change_events.append(event)
else:
filtered_events.append(event)
model_change_events = _resolve_models(model_change_events)
return filtered_events, model_change_events
def _check_population_configurations(population_configurations):
err = (
"Population configurations must be a list of PopulationConfiguration instances"
)
for config in population_configurations:
if not isinstance(config, demog.PopulationConfiguration):
raise TypeError(err)
# This class is only used in the 0.x interface.
Sample = collections.namedtuple("Sample", ["population", "time"])
def _samples_factory(sample_size, samples, population_configurations):
"""
Returns a list of Sample objects, given the specified inputs.
"""
the_samples = []
if sample_size is not None:
if samples is not None:
raise ValueError("Cannot specify sample size and samples simultaneously.")
if population_configurations is not None:
raise ValueError(
"Cannot specify sample size and population_configurations "
"simultaneously."
)
s = Sample(population=0, time=0.0)
the_samples = [s for _ in range(sample_size)]
# If we have population configurations we may have embedded sample_size
# values telling us how many samples to take from each population.
if population_configurations is not None:
_check_population_configurations(population_configurations)
if samples is None:
the_samples = []
for j, conf in enumerate(population_configurations):
if conf.sample_size is not None:
the_samples += [Sample(j, 0) for _ in range(conf.sample_size)]
else:
for conf in population_configurations:
if conf.sample_size is not None:
raise ValueError(
"Cannot specify population configuration sample size"
" and samples simultaneously"
)
the_samples = samples
elif samples is not None:
the_samples = samples
return the_samples
def _demography_factory(
Ne, population_configurations, migration_matrix, demographic_events
):
demography = demog.Demography.from_old_style(
population_configurations,
migration_matrix=migration_matrix,
demographic_events=demographic_events,
Ne=Ne,
ignore_sample_size=True,
)
return demography.validate()
def _build_initial_tables(*, sequence_length, samples, ploidy, demography, pedigree):
# NOTE: this is only used in the simulate() codepath.
tables = tskit.TableCollection(sequence_length)
if pedigree is None:
for index, (population, time) in enumerate(samples):
tables.nodes.add_row(
flags=tskit.NODE_IS_SAMPLE,
time=time,
population=population,
)
if population < 0:
raise ValueError(f"Negative population ID in sample at index {index}")
if population >= demography.num_populations:
raise ValueError(
f"Invalid population reference '{population}' in sample "
f"at index {index}"
)
else:
# TODO This should be removed - pedigree code path should only be callable
# from sim_ancestry
for parents, time, is_sample in zip(
pedigree.parents, pedigree.time, pedigree.is_sample
):
# We encode the parents in the metadata for now, but see
# https://github.com/tskit-dev/tskit/issues/852
encoded_parents = struct.pack("=ii", *parents)
ind_id = tables.individuals.add_row(0, metadata=encoded_parents)
node_flags = tskit.NODE_IS_SAMPLE if is_sample else 0
for _ in range(ploidy):
tables.nodes.add_row(node_flags, time, population=0, individual=ind_id)
# This is for the simulate() code path so we don't add metadata schemas
# and insert the user metadata in directly as encoded JSON, as before.
for population in demography.populations:
encoded_metadata = b""
if population.extra_metadata is not None:
encoded_metadata = json.dumps(population.extra_metadata).encode()
tables.populations.add_row(encoded_metadata)
return tables
def _parse_simulate(
sample_size=None,
*,
Ne=1,
length=None,
recombination_rate=None,
recombination_map=None,
population_configurations=None,
pedigree=None,
migration_matrix=None,
samples=None,
demographic_events=None,
model=None,
record_migrations=False,
from_ts=None,
start_time=None,
end_time=None,
record_full_arg=False,
num_labels=None,
random_seed=None,
):
"""
Argument parser for the simulate frontend. Interprets all the parameters
and returns an appropriate instance of Simulator.
"""
if Ne <= 0:
raise ValueError("Population size must be positive")
samples_specified = (
sample_size is None
and population_configurations is None
and samples is None
and from_ts is None
)
if samples_specified:
raise ValueError(
"Either sample_size, samples, population_configurations or from_ts must "
"be specified"
)
samples = _samples_factory(sample_size, samples, population_configurations)
models = [_model_factory(model)]
if demographic_events is not None:
demographic_events, model_change_events = _filter_events(demographic_events)
current_time = 0 if start_time is None else start_time
for mce in model_change_events:
models[-1].duration = mce.time - current_time
models.append(mce.model)
current_time = mce.time
demography = _demography_factory(
Ne, population_configurations, migration_matrix, demographic_events
)
# The logic for checking from_ts and recombination map is bound together
# in a complicated way, so we can factor them out into separate functions.
if from_ts is None:
if len(samples) < 2:
raise ValueError("Sample size must be >= 2")
else:
if len(samples) > 0:
raise ValueError("Cannot specify samples with from_ts")
if not isinstance(from_ts, tskit.TreeSequence):
raise TypeError("from_ts must be a TreeSequence instance.")
if demography.num_populations != from_ts.num_populations:
raise ValueError(
"Mismatch in the number of populations in from_ts and simulation "
"parameters. The number of populations in the simulation must be "
"equal to the number of populations in from_ts"
)
discrete_genome = False
if recombination_map is None:
# Default to 1 if no from_ts; otherwise default to the sequence length
# of from_ts
if from_ts is None:
the_length = 1 if length is None else length
else:
the_length = from_ts.sequence_length if length is None else length
the_rate = 0 if recombination_rate is None else recombination_rate
if the_length <= 0:
raise ValueError("Cannot provide non-positive sequence length")
if the_rate < 0:
raise ValueError("Cannot provide negative recombination rate")
recombination_map = intervals.RateMap.uniform(the_length, the_rate)
else:
if isinstance(recombination_map, intervals.RecombinationMap):
if recombination_map._is_discrete:
logger.info("Emulating v0.x discrete sites simulation")
discrete_genome = True
# Convert from the legacy RecombinationMap class
recombination_map = recombination_map.map
elif not isinstance(recombination_map, intervals.RateMap):
raise TypeError("RateMap instance required.")
if length is not None or recombination_rate is not None:
raise ValueError(
"Cannot specify length/recombination_rate along with "
"a recombination map"
)
if from_ts is not None:
if recombination_map.sequence_length != from_ts.sequence_length:
raise ValueError(
"Recombination map and from_ts must have identical " "sequence_length"
)
if num_labels is not None and num_labels < 1:
raise ValueError("Must have at least one structured coalescent label")
if from_ts is None:
tables = _build_initial_tables(
sequence_length=recombination_map.sequence_length,
samples=samples,
# FIXME not clear how this is all working now. We shouldn't have
# the pedigree as a parameter here at all which would probably
# simplify things.
ploidy=2,
demography=demography,
pedigree=pedigree,
)
else:
tables = from_ts.dump_tables()
# It's useful to call _parse_simulate outside the context of the main
# entry point - so we want to get good seeds in this case too.
random_seed = _parse_random_seed(random_seed)
random_generator = _msprime.RandomGenerator(random_seed)
sim = Simulator(
tables=tables,
recombination_map=recombination_map,
models=models,
store_migrations=record_migrations,
store_full_arg=record_full_arg,
start_time=start_time,
end_time=end_time,
num_labels=num_labels,
demography=demography,
# Defaults for the values that are not supported through simulate()
gene_conversion_map=intervals.RateMap.uniform(
recombination_map.sequence_length, 0
),
gene_conversion_tract_length=0,
discrete_genome=discrete_genome,
ploidy=2,
random_generator=random_generator,
)
return sim
def _parse_random_seed(seed):
"""
Parse the specified random seed value. If no seed is provided, generate a
high-quality random seed.
"""
if seed is None:
seed = core.get_random_seed()
seed = int(seed)
return seed
def _parse_replicate_index(*, replicate_index, random_seed, num_replicates):
"""
Parse the replicate_index value, and ensure that its value makes sense
in the context of the other parameters.
"""
if replicate_index is None:
return None
if random_seed is None:
raise ValueError("Cannot specify the replicate_index without a random_seed")
if num_replicates is not None:
raise ValueError("Cannot specify the replicate_index as well as num_replicates")
replicate_index = int(replicate_index)
if replicate_index < 0:
raise ValueError("Cannot specify negative replicate_index.")
return replicate_index
def _build_provenance(command, random_seed, frame):
"""
Builds a provenance dictionary suitable for use as the basis
of tree sequence provenance in replicate simulations. Uses the
specified stack frame to determine the values of the arguments
passed in, with a few exceptions.
"""
argspec = inspect.getargvalues(frame)
# num_replicates is excluded as provenance is per replicate
# replicate index is excluded as it is inserted for each replicate
parameters = {
"command": command,
**{
arg: argspec.locals[arg]
for arg in argspec.args
if arg not in ["num_replicates", "replicate_index"]
},
}
parameters["random_seed"] = random_seed
return provenance.get_provenance_dict(parameters)
def simulate(
sample_size=None,
*,
Ne=1,
length=None,
recombination_rate=None,
recombination_map=None,
mutation_rate=None,
population_configurations=None,
pedigree=None,
migration_matrix=None,
demographic_events=None,
samples=None,
model=None,
record_migrations=False,
random_seed=None,
replicate_index=None,
mutation_generator=None,
num_replicates=None,
from_ts=None,
start_time=None,
end_time=None,
record_full_arg=False,
num_labels=None,
record_provenance=True,
):
"""
Simulates the coalescent with recombination under the specified model
parameters and returns the resulting :class:`tskit.TreeSequence`. Note that
Ne is the effective diploid population size (so the effective number
of genomes in the population is 2*Ne), but ``sample_size`` is the
number of (monoploid) genomes sampled.
.. important::
This function is deprecated (but supported indefinitely);
please use :func:`.sim_ancestry` in new code.
:param int sample_size: The number of sampled monoploid genomes. If not
specified or None, this defaults to the sum of the subpopulation sample
sizes. Either ``sample_size``, ``population_configurations`` or
``samples`` must be specified.
:param float Ne: The effective (diploid) population size. This defaults to
1 if not specified.
:param float length: The length of the simulated region in bases.
This parameter cannot be used along with ``recombination_map``.
Defaults to 1 if not specified.
:param float recombination_rate: The rate of recombination per base
per generation. This parameter cannot be used along with
``recombination_map``. Defaults to 0 if not specified.
:param recombination_map: The map
describing the changing rates of recombination along the simulated
chromosome. This parameter cannot be used along with the
``recombination_rate`` or ``length`` parameters, as these
values are encoded within the map. Defaults to a uniform rate as
described in the ``recombination_rate`` parameter if not specified.
:type recombination_map: :class:`.RecombinationMap`
:param float mutation_rate: The rate of infinite sites
mutations per unit of sequence length per generation.
If not specified, no mutations are generated. This option only
allows for infinite sites mutations with a binary (i.e., 0/1)
alphabet. For more control over the mutational process, please
use the :func:`.mutate` function.
:param list population_configurations: The list of
:class:`.PopulationConfiguration` instances describing the
sampling configuration, relative sizes and growth rates of
the populations to be simulated. If this is not specified,
a single population with a sample of size ``sample_size``
is assumed.
:type population_configurations: list or None
:param list migration_matrix: The matrix describing the rates of migration
between all pairs of populations. If :math:`N` populations are defined
in the ``population_configurations`` parameter, then the migration
matrix must be an :math:`N \\times N` matrix with 0 on the diagonal,
consisting of :math:`N` lists of length :math:`N` or an :math:`N
\\times N` numpy array. The :math:`[j, k]^{th}` element of the
migration matrix gives the expected number of migrants moving from
population :math:`k` to population :math:`j` per generation, divided by
the size of population :math:`j`. When simulating from the
discrete-time Wright-Fisher model (``model = "dtwf"``), the row sums of
the migration matrix must not exceed 1. There are no sum constraints for
migration rates in continuous-time models.
:param list demographic_events: The list of demographic events to
simulate. Demographic events describe changes to the populations
in the past. Events should be supplied in non-decreasing
order of time in the past. Events with the same time value will be
applied sequentially in the order that they were supplied before the
simulation algorithm continues with the next time step.
:param list samples: The list specifying the location and time of
all samples. This parameter may be used to specify historical
samples, and cannot be used in conjunction with the ``sample_size``
parameter. Each sample is a (``population``, ``time``) pair
such that the sample in position ``j`` in the list of samples
is drawn in the specified population at the specfied time. Time
is measured in generations ago, as elsewhere.
:param int random_seed: The random seed. If this is `None`, a
random seed will be automatically generated. Valid random
seeds must be between 1 and :math:`2^{32} - 1`.
:param int num_replicates: The number of replicates of the specified
parameters to simulate. If this is not specified or None,
no replication is performed and a :class:`tskit.TreeSequence` object
returned. If `num_replicates` is provided, the specified
number of replicates is performed, and an iterator over the
resulting :class:`tskit.TreeSequence` objects returned.
:param tskit.TreeSequence from_ts: If specified, initialise the simulation
from the root segments of this tree sequence and return the
updated tree sequence. Please see :ref:`here
<sec_ancestry_initial_state>` for details on the required properties
of this tree sequence and its interactions with other parameters.
(Default: None).
:param float start_time: If specified, set the initial time that the
simulation starts to this value. If not specified, the start
time is zero if performing a simulation of a set of samples,
or is the time of the oldest node if simulating from an
existing tree sequence (see the ``from_ts`` parameter).
:param float end_time: If specified, terminate the simulation at the
specified time. In the returned tree sequence, all rootward paths from
samples with time <= end_time will end in a node with one child with
time equal to end_time. Sample nodes with time > end_time will
also be present in the output tree sequence. If not specified or ``None``,
run the simulation until all samples have an MRCA at all positions in
the genome.
:param bool record_full_arg: If True, record all intermediate nodes
arising from common ancestor and recombination events in the output
tree sequence. This will result in unary nodes (i.e., nodes in marginal
trees that have only one child). Defaults to False.
:param model: The simulation model to use.
This can either be a string (e.g., ``"smc_prime"``) or an instance of
a ancestry model class (e.g, ``msprime.DiscreteTimeWrightFisher()``.
:type model: str or AncestryModel
:param bool record_provenance: If True, record all input parameters
in the tree sequence :ref:`tskit:sec_provenance`.
:return: The :class:`tskit.TreeSequence` object representing the results
of the simulation if no replication is performed, or an
iterator over the independent replicates simulated if the
`num_replicates` parameter has been used.
:rtype: :class:`tskit.TreeSequence` or an iterator over
:class:`tskit.TreeSequence` replicates.
"""
replicate_index = _parse_replicate_index(
random_seed=random_seed,
num_replicates=num_replicates,
replicate_index=replicate_index,
)
random_seed = _parse_random_seed(random_seed)
provenance_dict = None
if record_provenance:
frame = inspect.currentframe()
provenance_dict = _build_provenance("simulate", random_seed, frame)
if mutation_generator is not None:
# This error was added in version 0.6.1.
raise ValueError(
"mutation_generator is not longer supported. Please use "
"msprime.mutate instead"
)
if mutation_rate is not None:
# There is ambiguity in how we should throw mutations onto partially
# built tree sequences: on the whole thing, or must the newly added
# topology? Before or after start_time? We avoid this complexity by
# asking the user to use mutate(), which should have the required
# flexibility.
if from_ts is not None:
raise ValueError(
"Cannot specify mutation rate combined with from_ts. Please use "
"msprime.mutate on the final tree sequence instead"
)
# There is ambiguity in how the start_time argument should interact with
# the mutation generator: should we throw mutations down on the whole
# tree or just the (partial) edges after start_time? To avoid complicating
# things here, make the user use mutate() which should have the flexibility
# to do whatever is needed.
if start_time is not None and start_time > 0:
raise ValueError(
"Cannot specify mutation rate combined with a non-zero "
"start_time. Please use msprime.mutate on the returned "
"tree sequence instead"
)
mutation_rate = float(mutation_rate)
sim = _parse_simulate(
sample_size=sample_size,
Ne=Ne,
length=length,
recombination_rate=recombination_rate,
recombination_map=recombination_map,
population_configurations=population_configurations,
pedigree=pedigree,
migration_matrix=migration_matrix,
demographic_events=demographic_events,
samples=samples,
model=model,
record_migrations=record_migrations,
from_ts=from_ts,
start_time=start_time,
end_time=end_time,
record_full_arg=record_full_arg,
num_labels=num_labels,
random_seed=random_seed,
)
return _wrap_replicates(
sim,
num_replicates=num_replicates,
replicate_index=replicate_index,
provenance_dict=provenance_dict,
mutation_rate=mutation_rate,
)
def _wrap_replicates(
simulator,
*,
num_replicates,
replicate_index,
provenance_dict,
mutation_rate=None,
):
"""
Wrapper for the logic used to run replicate simulations for the two
frontends.
"""
if num_replicates is None and replicate_index is None:
# Default single-replicate case.
replicate_index = 0
if replicate_index is not None:
num_replicates = replicate_index + 1
iterator = simulator.run_replicates(
num_replicates,
mutation_rate=mutation_rate,
provenance_dict=provenance_dict,
)
if replicate_index is not None:
deque = collections.deque(iterator, maxlen=1)
return deque.pop()
else:
return iterator
def _parse_rate_map(rate_param, sequence_length, name):
"""
Parse the specified input rate parameter value into a rate map.
"""
# Note: in the future we might have another clause here where we
# allow for a different map per population. This could be
# accepted as either a list of N rate maps, or a dictionary mapping
# population names to maps.
# See https://github.com/tskit-dev/msprime/issues/1095
msg_head = f"Error in parsing rate map for {name}: "
if isinstance(rate_param, intervals.RateMap):
rate_map = rate_param
if rate_map.sequence_length != sequence_length:
raise ValueError(msg_head + "sequence_length must match")
else:
rate_param = 0 if rate_param is None else float(rate_param)
rate_map = intervals.RateMap.uniform(sequence_length, rate_param)
return rate_map
def _insert_sample_sets(sample_sets, demography, default_ploidy, tables):
"""
Insert the samples described in the specified {population_id: num_samples}
map into the specified set of tables.
"""
for sample_set in sample_sets:
n = sample_set.num_samples
population = demography[sample_set.population]
time = (
population.default_sampling_time
if sample_set.time is None
else sample_set.time
)
ploidy = default_ploidy if sample_set.ploidy is None else sample_set.ploidy
logger.info(
f"Sampling {n} individuals with ploidy {ploidy} in population "
f"{population.id} (name='{population.name}') at time {time}"
)
node_individual = len(tables.individuals) + np.repeat(
np.arange(n, dtype=np.int32), ploidy
)
ind_flags = np.zeros(n, dtype=np.uint32)
tables.individuals.append_columns(flags=ind_flags)
N = n * ploidy
tables.nodes.append_columns(
flags=np.full(N, tskit.NODE_IS_SAMPLE, dtype=np.uint32),
time=np.full(N, time),
population=np.full(N, population.id, dtype=np.int32),
individual=node_individual,
)
def _parse_sample_sets(sample_sets, demography):
# Don't modify the inputs.
sample_sets = copy.deepcopy(sample_sets)
for sample_set in sample_sets:
if not isinstance(sample_set, SampleSet):
raise TypeError("msprime.SampleSet object required")
if not core.isinteger(sample_set.num_samples):
raise TypeError(
"The number of samples to draw from a population must be an integer"
)
sample_set.num_samples = int(sample_set.num_samples)
if sample_set.num_samples < 0:
raise ValueError("Number of samples cannot be negative")
if sample_set.population is None:
if demography.num_populations == 1:
sample_set.population = 0
else:
raise ValueError(
"Must specify a SampleSet population in multipopulation models"
)
if sum(sample_set.num_samples for sample_set in sample_sets) == 0:
raise ValueError("Zero samples specified")
return sample_sets
def _parse_samples(samples, demography, ploidy, tables):
"""
Parse the specified "samples" value for sim_ancestry and insert them into the
specified tables.
"""
if isinstance(samples, collections.abc.Sequence):
sample_sets = samples
elif isinstance(samples, collections.abc.Mapping):
sample_sets = [
SampleSet(num_samples, population)
for population, num_samples in samples.items()
]
elif core.isinteger(samples):
if len(tables.populations) != 1:
raise ValueError(
"Numeric samples can only be used in single population models. "
"Please use Demography.sample() to generate a list of samples "
"for your model, which can be used instead."
)
sample_sets = [SampleSet(samples)]
else:
raise TypeError(
f"The value '{samples}' cannot be interpreted as sample specification. "
"Samples must either be a single integer, a dict that maps populations "
"to the number of samples for that population, or a list of SampleSet "
"objects. Please see the online documentation for more details on "
"the different forms."
)
sample_sets = _parse_sample_sets(sample_sets, demography)
_insert_sample_sets(sample_sets, demography, ploidy, tables)
def _parse_sim_ancestry(
samples=None,
*,
sequence_length=None,
recombination_rate=None,
gene_conversion_rate=None,
gene_conversion_tract_length=None,
discrete_genome=None,
population_size=None,
demography=None,
ploidy=None,
model=None,
initial_state=None,
start_time=None,
end_time=None,
record_migrations=None,
record_full_arg=None,
num_labels=None,
random_seed=None,
init_for_debugger=False,
):
"""
Argument parser for the sim_ancestry frontend. Interprets all the parameters
and returns an appropriate instance of Simulator.
"""
# As a general rule we try to cast any input value to the required types
# early and in a way that provides an interpretable traceback.
# Simple defaults.
start_time = 0 if start_time is None else float(start_time)
end_time = math.inf if end_time is None else float(end_time)
discrete_genome = core._parse_flag(discrete_genome, default=True)
record_full_arg = core._parse_flag(record_full_arg, default=False)
record_migrations = core._parse_flag(record_migrations, default=False)
if initial_state is not None:
if isinstance(initial_state, tskit.TreeSequence):
initial_state = initial_state.dump_tables()
elif not isinstance(initial_state, tskit.TableCollection):
raise TypeError(
"initial_state must either be a TreeSequence or TableCollection instance"
)
if sequence_length is None:
# These are all the cases in which we derive the sequence_length
# from somewhere else.
if initial_state is not None:
sequence_length = initial_state.sequence_length
elif recombination_rate is None and gene_conversion_rate is None:
# In this case, we're doing single-locus simulations, so a sequence
# length of 1 makes sense.
sequence_length = 1
elif isinstance(recombination_rate, intervals.RateMap):
sequence_length = recombination_rate.sequence_length
elif isinstance(gene_conversion_rate, intervals.RateMap):
sequence_length = gene_conversion_rate.sequence_length
else:
raise ValueError(
"A sequence_length value must be specified. This can be either "
"via the the sequence_length parameter itself, of implicitly "
"through using a RateMap instance for the recombination_rate "
"or gene_conversion_rate parameters, or via the initial_state "
"tables. "
)
else:
sequence_length = float(sequence_length)
assert sequence_length is not None
if discrete_genome and math.floor(sequence_length) != sequence_length:
raise ValueError("Must have integer sequence length with discrete_genome=True")
recombination_map = _parse_rate_map(
recombination_rate, sequence_length, "recombination"
)
gene_conversion_map = _parse_rate_map(
gene_conversion_rate, sequence_length, "gene conversion"
)
if gene_conversion_tract_length is None:
if gene_conversion_rate is None:
# It doesn't matter what the tract_length is, just set a
# value to keep the low-level code happy.
gene_conversion_tract_length = 1
else:
raise ValueError(
"Must specify tract length when simulating gene conversion"
)
else:
if gene_conversion_rate is None:
raise ValueError(
"Must specify gene conversion rate along with tract length"
)
gene_conversion_tract_length = float(gene_conversion_tract_length)
# Default to diploid
ploidy = 2 if ploidy is None else ploidy
if not core.isinteger(ploidy):
raise TypeError("ploidy must be an integer")
ploidy = int(ploidy)
if ploidy < 1:
raise ValueError("ploidy must be >= 1")
models = _parse_model_arg(model)
is_dtwf = isinstance(models[0], DiscreteTimeWrightFisher)
# Check the demography. If no demography is specified, we default to a
# single-population model with a given population size.
if demography is None:
if is_dtwf:
# A default size of 1 isn't so smart for DTWF and almost certainly
# an error.
if population_size is None:
raise ValueError(
"When using the DTWF model, the population size must be set "
"explicitly, either using the population_size or demography "
"arguments."
)
if initial_state is not None:
if population_size is None:
raise ValueError(
"Must specify either a demography object or a population_size "
"(for single population models) when providing an initial_state."
)
if len(initial_state.populations) > 1:
raise ValueError(
"Must specify demography for initial_state with > 1 population"
)
population_size = 1 if population_size is None else float(population_size)
demography = demog.Demography.isolated_model([population_size])
elif isinstance(demography, demog.Demography):
if population_size is not None:
raise ValueError("Cannot specify demography and population size")
else:
raise TypeError("demography argument must be an instance of msprime.Demography")
demography = demography.validate()
if initial_state is None:
if samples is None and not init_for_debugger:
raise ValueError(
"Either the samples or initial_state arguments must be provided"
)
initial_state = tskit.TableCollection(sequence_length)
demography.insert_populations(initial_state)
if not init_for_debugger:
_parse_samples(samples, demography, ploidy, initial_state)
else:
if samples is not None:
raise ValueError("Cannot specify both samples and initial_state")
if sequence_length != initial_state.sequence_length:
raise ValueError(
"The initial_state sequence length must be consistent with the"
"value derived from either the sequence_length, "
"recombination_rate or gene_conversion_rate parameters."
)
if len(initial_state.populations) == 0:
raise ValueError(
"initial_state tables must define at least one population."
)
# Make sure the names match-up in the input demography.
demography_check = demog.Demography.from_tree_sequence(
initial_state.tree_sequence()
)
if demography.num_populations < demography_check.num_populations:
raise ValueError(
"Input demography must have at least as many populations as the "
"initial state population table: "
f"{demography.num_populations} < {demography_check.num_populations}"
)
for pop1, pop2 in zip(demography.populations, demography_check.populations):
if pop1.name != pop2.name:
raise ValueError(
"Population names in the input demography and the initial "
f"state population table must be equal: {pop1.name} ≠ {pop2.name}"
)
demography.insert_extra_populations(initial_state)
# It's useful to call _parse_sim_ancestry outside the context of the main
# entry point - so we want to get good seeds in this case too.
random_seed = _parse_random_seed(random_seed)
random_generator = _msprime.RandomGenerator(random_seed)
return Simulator(
tables=initial_state,
recombination_map=recombination_map,
gene_conversion_map=gene_conversion_map,
gene_conversion_tract_length=gene_conversion_tract_length,
discrete_genome=discrete_genome,
ploidy=ploidy,
demography=demography,
models=models,
store_migrations=record_migrations,
store_full_arg=record_full_arg,
start_time=start_time,
end_time=end_time,
num_labels=num_labels,
random_generator=random_generator,
)
def sim_ancestry(
samples=None,
*,
demography=None,
sequence_length=None,
discrete_genome=None,
recombination_rate=None,
gene_conversion_rate=None,
gene_conversion_tract_length=None,
population_size=None,
ploidy=None,
model=None,
initial_state=None,
start_time=None,
end_time=None,
record_migrations=None,
record_full_arg=None,
num_labels=None,
random_seed=None,
num_replicates=None,
replicate_index=None,
record_provenance=None,
):
"""
Simulates an ancestral process described by the specified model, demography and
samples, and return a :class:`tskit.TreeSequence` (or a sequence of
replicate tree sequences).
:param samples: The sampled individuals as either an integer, specifying
the number of individuals to sample in a single-population model;
or a list of :class:`.SampleSet` objects defining the properties of
groups of similar samples; or as a mapping in which the keys
are population identifiers (either an integer ID or string name)
and the values are the number of samples to take from the corresponding
population at its default sampling time. It is important to note that
samples correspond to *individuals* here, and each sampled individual
is usually associated with :math:`k` sample *nodes* (or genomes) when
``ploidy`` = :math:`k`. See the :ref:`sec_ancestry_samples` section
for further details.
Either ``samples`` or ``initial_state`` must be specified.
:param demography: The demographic model to simulate, describing the
extant and ancestral populations, their population sizes and growth
rates, their migration rates, and demographic events affecting the
populations over time. See the :ref:`sec_demography` section for
details on how to specify demographic models and
:ref:`sec_ancestry_samples` for details on how to specify the
populations that samples are drawn from. If not specified (or None) we
default to a single population with constant size 1
(see also the ``population_size`` parameter).
:param int ploidy: The number of monoploid genomes per sample individual
(Default=2). See the :ref:`sec_ancestry_ploidy` section for usage examples.
:param float sequence_length: The length of the genome sequence to simulate.
See the :ref:`sec_ancestry_sequence_length` section for usage examples
for this parameter and how it interacts with other parameters.
:param bool discrete_genome: If True (the default) simulation occurs
in discrete genome coordinates such that recombination and
gene conversion breakpoints always occur at integer positions.
Thus, multiple (e.g.) recombinations can occur at the same
genome position. If ``discrete_genome`` is False simulations
are performed using continuous genome coordinates. In this
case multiple events at precisely the same genome location are very
unlikely (but technically possible).
See the :ref:`sec_ancestry_discrete_genome` section for usage examples.
:param recombination_rate: The rate of recombination along the sequence;
can be either a single value (specifying a single rate over the entire
sequence) or an instance of :class:`RateMap`.
See the :ref:`sec_ancestry_recombination` section for usage examples
for this parameter and how it interacts with other parameters.
:param gene_conversion_rate: The rate of gene conversion along the sequence.
If provided, a value for ``gene_conversion_tract_length`` must also be
specified. See the :ref:`sec_ancestry_gene_conversion` section
for usage examples for this parameter and how it interacts with
other parameters.
:param gene_conversion_tract_length: The mean length of the gene conversion
tracts. For discrete genomes the tract lengths are geometrically
distributed with mean ``gene_conversion_tract_length``, which must be
greater than or equal to 1. For continuous genomes the tract lengths are
exponentially distributed with mean ``gene_conversion_tract_length``,
which must be larger than 0.
:param population_size: The size of the default single population
:class:`.Demography`. If not specified, defaults to 1. Cannot be specified
along with the ``demography`` parameter. See the :ref:`sec_demography`
section for more details on demographic models and population sizes
and the :ref:`sec_ancestry_demography` section for usage examples.
:param int random_seed: The random seed. If this is not specified or `None`,
a high-quality random seed will be automatically generated. Valid random
seeds must be between 1 and :math:`2^{32} - 1`.
See the :ref:`sec_ancestry_random_seed` section for usage examples.
:param int num_replicates: The number of replicates of the specified
parameters to simulate. If this is not specified or `None`,
no replication is performed and a :class:`tskit.TreeSequence` object
returned. If `num_replicates` is provided, the specified
number of replicates is performed, and an iterator over the
resulting :class:`tskit.TreeSequence` objects returned.
See the :ref:`sec_ancestry_replication` section for examples.
:param bool record_full_arg: If True, record all intermediate nodes
arising from common ancestor and recombination events in the output
tree sequence. This will result in unary nodes (i.e., nodes in marginal
trees that have only one child). Defaults to False.
See the :ref:`sec_ancestry_full_arg` section for examples.
:param bool record_migrations: If True, record all migration events
that occur in the :ref:`tskit:sec_migration_table_definition` of
the output tree sequence. Defaults to False.
See the :ref:`sec_ancestry_record_migrations` section for examples.
:param tskit.TreeSequence initial_state: If specified, initialise the
simulation from the root segments of this tree sequence and return the
completed tree sequence. Please see
:ref:`sec_ancestry_initial_state` for details of the required
properties of this tree sequence and its interactions with other parameters.
All information in the ``initial_state`` tables is preserved
(including metadata) and included in the returned tree sequence.
(Default: None).
:param float start_time: If specified, set the initial time that the
simulation starts to this value. If not specified, the start
time is zero if performing a simulation of a set of samples,
or is the time of the oldest node if simulating from an
existing tree sequence (see the ``initial_state`` parameter).
See the :ref:`sec_ancestry_start_time` section for examples.
:param float end_time: If specified, terminate the simulation at the
specified time. In the returned tree sequence, all rootward paths from
samples with time < ``end_time`` will end in a node with one child with
time equal to end_time. Any sample nodes with time >= ``end_time`` will
also be present in the output tree sequence. If not specified or ``None``,
run the simulation until all samples have an MRCA at all positions in
the genome. See the :ref:`sec_ancestry_end_time` section for examples.
:param bool record_provenance: If True (the default), record all input
parameters in the tree sequence :ref:`tskit:sec_provenance`.
:param model: The ancestry model to use. This can be either a
single instance of :class:`.AncestryModel` (or a string that can be
interpreted as an ancestry model), or a list of :class:`.AncestryModel`
instances. If the ``duration`` attribute of any of these models is
set, the simulation will be run until at most :math:`t + t_m`, where
:math:`t` is the simulation time when the model starts and :math:`t_m`
is the model's ``duration``. If the ``duration`` is not set, the
simulation will continue until the model completes, the overall
``end_time`` is reached, or overall coalescence. See
the :ref:`sec_ancestry_models_specifying` section for more details,
and the :ref:`sec_ancestry_models` section for the available models
and examples.
:type model: str or .AncestryModel or list
:return: The :class:`tskit.TreeSequence` object representing the results
of the simulation if no replication is performed, or an
iterator over the independent replicates simulated if the
`num_replicates` parameter has been used.
:rtype: :class:`tskit.TreeSequence` or an iterator over
:class:`tskit.TreeSequence` replicates.
"""
record_provenance = True if record_provenance is None else record_provenance
replicate_index = _parse_replicate_index(
random_seed=random_seed,
num_replicates=num_replicates,
replicate_index=replicate_index,
)
random_seed = _parse_random_seed(random_seed)
provenance_dict = None
if record_provenance:
frame = inspect.currentframe()
provenance_dict = _build_provenance("sim_ancestry", random_seed, frame)
sim = _parse_sim_ancestry(
samples=samples,
sequence_length=sequence_length,
recombination_rate=recombination_rate,
gene_conversion_rate=gene_conversion_rate,
gene_conversion_tract_length=gene_conversion_tract_length,
discrete_genome=discrete_genome,
population_size=population_size,
demography=demography,
ploidy=ploidy,
model=model,
initial_state=initial_state,
start_time=start_time,
end_time=end_time,
record_migrations=record_migrations,
record_full_arg=record_full_arg,
num_labels=num_labels,
random_seed=random_seed,
)
return _wrap_replicates(
sim,
num_replicates=num_replicates,
replicate_index=replicate_index,
provenance_dict=provenance_dict,
)
class ExitReason(enum.IntEnum):
"""
The different reasons that the low-level simulation exits.
"""
MAX_EVENTS = _msprime.EXIT_MAX_EVENTS
"""
We ran for the specified maximum number of events. We usually
run for a maximum number of events so that we return to Python
regularly, to update logs and to check if CTRL-C has been hit, etc.
"""
MAX_TIME = _msprime.EXIT_MAX_TIME
"""
The simulation is equal to the specified maximum time.
"""
COALESCENCE = _msprime.EXIT_COALESCENCE
"""
The simulation is complete, and we have fully coalesced.
"""
MODEL_COMPLETE = _msprime.EXIT_MODEL_COMPLETE
"""
The model we have specified has run to completion **without**
resulting in coalescence.
"""
class Simulator(_msprime.Simulator):
"""
Class to simulate trees under a variety of population models.
Note: this class is not intended to be instantiated directly
and is only for internal library use. The interface may change
arbitrarily between versions.
"""
def __init__(
self,
*,
tables,
recombination_map,
gene_conversion_map,
gene_conversion_tract_length,
discrete_genome,
ploidy,
demography,
random_generator,
models=None,
store_migrations=False,
store_full_arg=False,
start_time=None,
end_time=None,
num_labels=None,
):
# We always need at least n segments, so no point in making
# allocation any smaller than this.
num_samples = len(tables.nodes)
block_size = 64 * 1024
segment_block_size = max(block_size, num_samples)
avl_node_block_size = block_size
node_mapping_block_size = block_size
if num_labels is None:
num_labels = self._choose_num_labels(models)
# Now, convert the high-level values into their low-level
# counterparts.
ll_population_configuration = [pop.asdict() for pop in demography.populations]
ll_demographic_events = [
event.get_ll_representation() for event in demography.events
]
ll_recomb_map = self._resolve_missing_intervals(recombination_map)
ll_tables = _msprime.LightweightTableCollection(tables.sequence_length)
ll_tables.fromdict(tables.asdict())
# FIXME support arbitrary gene conversion maps.
# https://github.com/tskit-dev/msprime/issues/1212
assert len(gene_conversion_map.rate) == 1
gene_conversion_rate = gene_conversion_map.rate[0]
start_time = -1 if start_time is None else start_time
super().__init__(
tables=ll_tables,
recombination_map=ll_recomb_map,
start_time=start_time,
random_generator=random_generator,
migration_matrix=demography.migration_matrix,
population_configuration=ll_population_configuration,
demographic_events=ll_demographic_events,
store_migrations=store_migrations,
store_full_arg=store_full_arg,
num_labels=num_labels,
segment_block_size=segment_block_size,
avl_node_block_size=avl_node_block_size,
node_mapping_block_size=node_mapping_block_size,
gene_conversion_rate=gene_conversion_rate,
gene_conversion_tract_length=gene_conversion_tract_length,
discrete_genome=discrete_genome,
ploidy=ploidy,
)
# Highlevel attributes used externally that have no lowlevel equivalent
self.end_time = np.inf if end_time is None else end_time
self.models = models
self.demography = demography
# Temporary, until we add the low-level infrastructure for the gc map
# when we'll take the same approach as the recombination map.
self.gene_conversion_map = gene_conversion_map
def copy_tables(self):
"""
Returns a copy of the underlying table collection. This is useful
for testing and avoids using the LightweightTableCollection object,
which is returned by self.tables.
"""
return tskit.TableCollection.fromdict(self.tables.asdict())
@property
def sample_configuration(self):
"""
Returns a list of the number of samples in each of the populations.
"""
tables = self.copy_tables()
num_samples = [0 for _ in tables.populations]
for node in tables.nodes:
if (node.flags & tskit.NODE_IS_SAMPLE) != 0:
num_samples[node.population] += 1
return num_samples
@property
def recombination_map(self):
return intervals.RateMap(**super().recombination_map)
def _choose_num_labels(self, models):
"""
Choose the number of labels appropriately, given the ancestry
models that will be simulated.
"""
num_labels = 1
for model in models:
if isinstance(model, SweepGenicSelection):
num_labels = 2
return num_labels
def _resolve_missing_intervals(self, recombination_map):
"""
Inspect the recombination map for unknown intervals, resolve
the appropriate recombination rate to use in the actual simulation,
and return the low-level recombination map representation.
Also store the set of missing_intervals for later use in delete_intervals.
For now we only support unknown values in the flanking regions,
and insist that we can have at most two of them (so, no attempt to
coalesce adjacent unknown intervals at the ends of the map).
See https://github.com/tskit-dev/msprime/issues/1604 for plans
and discussion for how to simulate chromosomes with unknown
regions in the middle.
"""
self.missing_intervals = recombination_map.missing_intervals()
error_msg = (
"Missing regions of the genome other than the flanks are currently "
"not supported. Please see "
"https://github.com/tskit-dev/msprime/issues/1604"
)
if self.missing_intervals.shape[0] > 2:
raise ValueError(error_msg)
for left, right in self.missing_intervals:
if not (left == 0 or right == recombination_map.sequence_length):
raise ValueError(error_msg)
ll_recomb_map = recombination_map.asdict()
missing = recombination_map.missing
rate = recombination_map.rate.copy()
rate[missing] = 0
ll_recomb_map["rate"] = rate
return ll_recomb_map
def _run_until(self, end_time, event_chunk=None, debug_func=None):
# This is a pretty big default event chunk so that we don't spend
# too much time going back and forth into Python. We could imagine
# doing something a bit more sophisticated where we try to tune the
# number of events so that we end up with roughly 10 second slices
# (say).
if event_chunk is None:
event_chunk = 10 ** 4
if event_chunk <= 0:
raise ValueError("Must have at least 1 event per chunk")
logger.info("Running model %s until max time: %f", self.model, end_time)
ret = ExitReason.MAX_EVENTS
while ret == ExitReason.MAX_EVENTS:
ret = ExitReason(super().run(end_time, event_chunk))
if self.time > end_time:
# Currently the Pedigree and Sweeps models are "non-rentrant"
# We can change this to an assertion once these have been fixed.
raise RuntimeError(
f"Model {self.model['name']} does not support interruption. "
"Please open an issue on GitHub"
)
logger.debug(
"time=%g ancestors=%d ret=%s", self.time, self.num_ancestors, ret
)
if debug_func is not None:
debug_func(self)
return ret
def run(self, event_chunk=None, debug_func=None):
"""
Runs the simulation until complete coalescence has occurred,
end_time has been reached, or all model durations have
elapsed.
"""
for j, model in enumerate(self.models):
self.model = model._as_lowlevel()
logger.info(
"model[%d] %s started at time=%g nodes=%d edges=%d",
j,
self.model,
self.time,
self.num_nodes,
self.num_edges,
)
model_duration = np.inf if model.duration is None else model.duration
if model_duration < 0:
raise ValueError("Model durations must be >= 0")
end_time = min(self.time + model_duration, self.end_time)
exit_reason = self._run_until(end_time, event_chunk, debug_func)
if exit_reason == ExitReason.COALESCENCE or self.time == self.end_time:
logger.debug("Skipping remaining %d models", len(self.models) - j - 1)
break
self.finalise_tables()
logger.info(
"Completed at time=%g nodes=%d edges=%d",
self.time,
self.num_nodes,
self.num_edges,
)
def run_replicates(
self,
num_replicates,
*,
mutation_rate=None,
provenance_dict=None,
):
"""
Sequentially yield the specified number of simulation replicates.
"""
encoded_provenance = None
# The JSON is modified for each replicate to insert the replicate number.
# To avoid repeatedly encoding the same JSON (which can take milliseconds)
# we insert a replaceable string.
placeholder = "@@_REPLICATE_INDEX_@@"
if provenance_dict is not None:
provenance_dict["parameters"]["replicate_index"] = placeholder
encoded_provenance = provenance.json_encode_provenance(
provenance_dict, num_replicates
)
for replicate_index in range(num_replicates):
logger.info("Starting replicate %d", replicate_index)
self.run()
if mutation_rate is not None:
# This is only called from simulate() or the ms interface,
# so does not need any further parameters.
mutations._simple_mutate(
tables=self.tables,
random_generator=self.random_generator,
sequence_length=self.sequence_length,
rate=mutation_rate,
)
tables = tskit.TableCollection.fromdict(self.tables.asdict())
if len(self.missing_intervals) > 0:
tables.delete_intervals(self.missing_intervals, record_provenance=False)
replicate_provenance = None
if encoded_provenance is not None:
replicate_provenance = encoded_provenance.replace(
f'"{placeholder}"', str(replicate_index)
)
tables.provenances.add_row(replicate_provenance)
# There are rare cases when we are simulating from
# awkward initial states where the tables we produce in the
# simulation are not correctly sorted. The simplest course
# of action here to just let it fail and sort.
# https://github.com/tskit-dev/msprime/issues/1606
try:
ts = tables.tree_sequence()
except tskit.LibraryError:
# TODO add a warning? This is probably badly formed input
# so it seems reasonable to issue a warning.
tables.sort()
ts = tables.tree_sequence()
yield ts
self.reset()
@dataclasses.dataclass
class SampleSet:
"""
Specify a set of exchangable sample individuals with a given ploidy
value from a population at a given time. See the
:ref:`sec_ancestry_samples` section for details and examples.
"""
num_samples: int
"""
The number of k-ploid sample **individuals** to draw.
"""
population: Union[int, str, None] = None
"""
The population in which the samples are drawn. May be either a
string name or integer ID (see
:ref:`sec_demography_populations_identifiers` details).
"""
time: Union[float, None] = None
"""
The time at which these samples are drawn. If not specified or None,
defaults to the :attr:`.Population.default_sampling_time`.
"""
ploidy: Union[int, None] = None
"""
The number of monoploid genomes to sample for each sample individual.
See the :ref:`sec_ancestry_ploidy` section for more details and
examples.
"""
def asdict(self):
return dataclasses.asdict(self)
@dataclasses.dataclass
class SimulationModelChange:
"""
Demographic event denoting an change in ancestry model.
.. important::
This class is deprecated (but supported indefinitely);
please use the ``model`` argument in :func:`sim_ancestry`
to specify multiple models in new code.
"""
time: Union[float, None] = None
"""
The time at which the ancestry model changes to the new model, in
generations. After this time, all internal tree nodes, edges and migrations
are the result of the new model. If time is set to None (the default), the
model change will occur immediately after the previous model has completed.
"""
# Can't use typehints here because having a reference to AncestryModel
# breaks autodoc, which wants to call it msprime.ancestry.AncestryModel
# whereas we have it documented as msprime.AncestryModel. Annoying.
model: Any = None
"""
The new ancestry model to use. This can either be a string (e.g.,
``"smc_prime"``) or an instance of an ancestry model class (e.g,
``msprime.DiscreteTimeWrightFisher()``. Please see the
:ref:`sec_ancestry_models` section for more details on specifying these
models. If this is None (the default) the model is changed to the standard
coalescent.
"""
def asdict(self):
return dataclasses.asdict(self)
@dataclasses.dataclass(init=False)
class AncestryModel:
"""
Abstract superclass of all ancestry models.
"""
duration: Union[float, None]
"""
The time duration that this model should run for. If None, the model
will run until completion (i.e., until the simulation coalesces
or the model itself completes). Otherwise, this defines the maximum
time duration which the model can run. See the
:ref:`sec_ancestry_models_specifying` section for more details.
"""
name: ClassVar[str]
# We have to define an __init__ to enfore keyword-only behaviour
def __init__(self, *, duration=None):
self.duration = duration
# We need to have a separate _as_lowlevel and asdict because the
# asdict form can't have the name in the dictionary for the
# provenance code.
def _as_lowlevel(self):
d = {"name": self.name}
d.update(self.asdict())
return d
def asdict(self):
return dataclasses.asdict(self)
class StandardCoalescent(AncestryModel):
"""
The classical coalescent with recombination model (i.e., Hudson's algorithm).
The string ``"hudson"`` can be used to refer to this model.
This is a continuous time model in which the time to the next event
is exponentially distributed with rates depending on the population size(s),
migration rates, numbers of extant lineages and the amount of ancestral
material currently present. See
`Kelleher et al. (2016) <https://doi.org/10.1371/journal.pcbi.1004842>`_ for a
detailed description of the model and further references.
"""
name = "hudson"
class SmcApproxCoalescent(AncestryModel):
"""
The Sequentially Markov Coalescent (SMC) model defined by
`McVean and Cardin (2005) <https://dx.doi.org/10.1098%2Frstb.2005.1673>`_.
In the SMC, only common ancestor events that result in marginal coalescences
are possible. Under this approximation, the marginal trees along the
genome depend only on the immediately previous tree (i.e. are Markovian).
.. note::
This model is implemented using a naive rejection sampling approach
and so it may not be any more efficient to simulate than the
standard Hudson model.
The string ``"smc"`` can be used to refer to this model.
"""
name = "smc"
class SmcPrimeApproxCoalescent(AncestryModel):
"""
The SMC' model defined by
`Marjoram and Wall (2006) <https://doi.org/10.1186/1471-2156-7-16>`_
as a refinement of the :class:`SMC<SmcApproxCoalescent>`. The SMC'
extends the SMC by additionally allowing common ancestor events that
join contiguous tracts of ancestral material (as well as events that
result in marginal coalescences).
.. note::
This model is implemented using a naive rejection sampling approach
and so it may not be any more efficient to simulate than the
standard Hudson model.
The string ``"smc_prime"`` can be used to refer to this model.
"""
name = "smc_prime"
class DiscreteTimeWrightFisher(AncestryModel):
"""
A discrete backwards-time Wright-Fisher model, with diploid back-and-forth
recombination. The string ``"dtwf"`` can be used to refer to this model.
Wright-Fisher simulations are performed very similarly to coalescent
simulations, with all parameters denoting the same quantities in both
models. Because events occur at discrete times however, the order in which
they occur matters. Each generation consists of the following ordered
events:
- Migration events. As in the Hudson coalescent, these move single extant
lineages between populations. Because migration events occur before
lineages choose parents, migrant lineages choose parents from their new
population in the same generation.
- Demographic events. All events with `previous_generation < event_time <=
current_generation` are carried out here.
- Lineages draw parents. Each (monoploid) extant lineage draws a parent
from their current population.
- Diploid recombination. Each parent is diploid, so all child lineages
recombine back-and-forth into the same two parental genome copies. These
become two independent lineages in the next generation.
- Historical sampling events. All historical samples with
`previous_generation < sample_time <= current_generation` are inserted.
"""
name = "dtwf"
class WrightFisherPedigree(AncestryModel):
# TODO Complete documentation.
# TODO Since the pedigree is a necessary parameter for this simulation
# model and it cannot be used with any other model we should make it a
# parametric model where the parameter is the pedigree. This would
# streamline a bunch of logic.
"""
Backwards-time simulations through a pre-specified pedigree, with diploid
individuals and back-and-forth recombination. The string ``"wf_ped"`` can
be used to refer to this model.
"""
name = "wf_ped"
class ParametricAncestryModel(AncestryModel):
"""
The superclass of ancestry models that require extra parameters.
"""
@dataclasses.dataclass
class BetaCoalescent(ParametricAncestryModel):
"""
A Lambda-coalescent with multiple mergers in the haploid cases, or a
Xi-coalescent with simultaneous multiple mergers in the polyploid case.
There are two main differences between the Beta-coalescent and the
standard coalescent. Firstly, the number of lineages that take part in each
common ancestor event is random, with distribution determined by moments of
the :math:`Beta(2 - \\alpha, \\alpha)`-distribution. In particular, when there
are :math:`n` lineages, each set of :math:`k \\leq n` of them participates in a
common ancestor event at rate
.. math::
\\frac{1}{B(2 - \\alpha, \\alpha)}
\\int_0^1 x^{k - \\alpha - 1} (1 - x)^{n - k + \\alpha - 1} dx,
where :math:`B(2 - \\alpha, \\alpha)` is the Beta-function.
If ploidy = 1, then all participating lineages merge into one common ancestor,
corresponding to haploid, single-parent reproduction.
If ploidy = :math:`p > 1`, all participating lineages split randomly into
:math:`2 p` groups, corresponding to two-parent reproduction with :math:`p` copies
of each chromosome per parent. All lineages within each group merge simultaneously.
Secondly, the number of generations between common ancestor events predicted by the
Beta-coalescent is proportional to :math:`N^{\\alpha - 1}`, where :math:`N` is
the population size. Specifically, the mean number of generations until
two lineages undergo a common ancestor event is
.. math::
G = \\frac{m^{\\alpha} N^{\\alpha - 1}}{\\alpha B(2 - \\alpha, \\alpha)},
if ploidy = 1, and
.. math::
G = \\frac{2 p m^{\\alpha} (N / 2)^{\\alpha - 1}}
{\\alpha B(2 - \\alpha, \\alpha)},
if ploidy = :math:`p > 1`, where :math:`m` is the mean number of juveniles per
family given by
.. math::
m = 2 + \\frac{2^{\\alpha}}{3^{\\alpha - 1} (\\alpha - 1)},
if ploidy > 1, and
.. math::
m = 1 + \\frac{1}{2^{\\alpha - 1} (\\alpha - 1)},
if ploidy = 1.
In the polyploid case we divide the population size :math:`N` by two
because we assume the :math:`N` polyploid individuals form :math:`N / 2`
two-parent families in which reproduction takes place.
.. warning::
The number of generations between common ancestor events :math:`G` depends
both on the population size :math:`N` and :math:`\\alpha`,
and can be dramatically shorter than in the case of the
standard coalescent. For :math:`\\alpha \\approx 1` that is due to
insensitivity of :math:`G` to :math:`N` --- see
:ref:`sec_ancestry_models_multiple_mergers` for an illustration.
For :math:`\\alpha \\approx 2`, :math:`G` is almost linear in
:math:`N`, but can nevertheless be small because
:math:`B(2 - \\alpha, \\alpha) \\rightarrow \\infty` as
:math:`\\alpha \\rightarrow 2`. As a result, population sizes
must often be many orders of magnitude larger than census population sizes
to obtain realistic amounts of diversity in simulated samples.
See `Schweinsberg (2003)
<https://www.sciencedirect.com/science/article/pii/S0304414903000280>`_
for the derivation of the common ancestor event rate,
as well as the number of generations between common ancestor events.
Note however that Schweinsberg (2003) only covers the haploid case.
For details of the diploid extension, see
`Blath et al. (2013) <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3527250/>`_,
and `Birkner et al. (2018) <https://projecteuclid.org/euclid.ejp/1527818427>`_
for a diploid version of the Schweinsberg (2003) model specifically.
The general polyploid model is analogous to the diploid case, with
:math:`2 p` available copies of parental chromsomes per common ancestor event,
and hence up to :math:`2 p` simultaneous mergers.
:param float alpha: Determines the degree of skewness in the family size
distribution, and must satisfy :math:`1 < \\alpha < 2`. Smaller values of
:math:`\\alpha` correspond to greater skewness, and :math:`\\alpha = 2`
would coincide with the standard coalescent.
:param float truncation_point: The maximum number of juveniles :math:`K` born to
one family as a fraction of the population size :math:`N`. Must satisfy
:math:`0 < K \\leq \\inf`. Determines the maximum fraction of the population
replaced by offspring in one reproduction event, :math:`\\tau`, via
:math:`\\tau = K / (K + m)`, where :math:`m` is the mean juvenile number
above. The default is :math:`K = \\inf`, which corresponds to the standard
Beta-coalescent with :math:`\\tau = 1`. When :math:`K < \\inf`, the number of
lineages participating in a common ancestor event is determined by moments
of the Beta:math:`(2 - \\alpha, \\alpha)` distribution conditioned on not
exceeding :math:`\\tau`, and the Beta-function in the expression
for :math:`G` is replaced by the incomplete Beta-function
:math:`B(\\tau; 2 - \\alpha, \\alpha)`.
"""
name = "beta"
alpha: Union[float, None]
truncation_point: float
# We have to define an __init__ to enfore keyword-only behaviour
def __init__(
self, *, duration=None, alpha=None, truncation_point=sys.float_info.max
):
self.duration = duration
self.alpha = alpha
self.truncation_point = truncation_point
@dataclasses.dataclass
class DiracCoalescent(ParametricAncestryModel):
"""
A Lambda-coalescent with multiple mergers in the haploid cases, or a
Xi-coalescent with simultaneous multiple mergers in the polyploid case.
The Dirac-coalescent is an implementation of the model of
`Blath et al. (2013) <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3527250/>`_
The simulation proceeds similarly to the standard coalescent.
In addition to binary common ancestor events at rate :math:`n (n - 1) / 2` when
there are :math:`n` lineages, potential multiple merger events take place
at rate :math:`c > 0`. Each lineage participates in each multiple merger
event independently with probability :math:`0 < \\psi \\leq 1`.
If ploidy = 1, then all participating lineages merge into one common ancestor,
corresponding to haploid, single-parent reproduction.
If ploidy = :math:`p > 1`, all participating lineages split randomly into
:math:`2 p` groups, corresponding to two-parent reproduction with :math:`p` copies
of each chromosome per parent. All lineages within each group merge simultaneously.
.. warning::
The Dirac-coalescent is obtained as a scaling limit of Moran models,
rather than Wright-Fisher models. As a consequence, the number of generations
between coalescence events is proportional to :math:`N^2`,
rather than :math:`N` generations as in the standard coalescent.
See :ref:`sec_ancestry_models_multiple_mergers` for an illustration
of how this affects simulation output in practice.
:param float c: Determines the rate of potential multiple merger events.
We require :math:`c > 0`.
:param float psi: Determines the fraction of the population replaced by
offspring in one large reproduction event, i.e. one reproduction event
giving rise to potential multiple mergers when viewed backwards in time.
We require :math:`0 < \\psi \\leq 1`.
"""
name = "dirac"
psi: Union[float, None]
c: Union[float, None]
# We have to define an __init__ to enfore keyword-only behaviour
def __init__(self, *, duration=None, psi=None, c=None):
self.duration = duration
self.psi = psi
self.c = c
@dataclasses.dataclass
class SweepGenicSelection(ParametricAncestryModel):
"""
A selective sweep that has occured in the history of the sample.
This will lead to a burst of rapid coalescence near the selected site.
The strength of selection during the sweep is determined by the
parameter :math:`s`. Here we define s such that the
fitness of the three genotypes at our benefical locus are
:math:`W_{bb}=1`, :math:`W_{Bb}=1 + s/2`, :math:`W_{BB}=1 + s`.
Thus fitness of the heterozygote is intermediate to the
two homozygotes.
The model is one of a structured coalescent where selective backgrounds are
defined as in
`Braverman et al. (1995) <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1206652/>`_
The implementation details here follow closely those in discoal
`(Kern and Schrider, 2016)
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5167068/>`_
See :ref:`sec_ancestry_models_selective_sweeps` for examples and
details on how to specify different types of sweeps.
.. warning::
Currently models with more than one population and a selective sweep
are not implemented. Population size changes during the sweep
are not yet possible in msprime.
:param float position: the location of the beneficial allele along the
chromosome.
:param float start_frequency: population frequency of the benefical
allele at the start of the selective sweep. E.g., for a *de novo*
allele in a diploid population of size N, start frequency would be
:math:`1/2N`.
:param float end_frequency: population frequency of the beneficial
allele at the end of the selective sweep.
:param float s: :math:`s` is the selection coefficient of the beneficial mutation.
:param float dt: dt is the small increment of time for stepping through
the sweep phase of the model. a good rule of thumb is for this to be
approximately :math:`1/40N` or smaller.
"""
name = "sweep_genic_selection"
position: Union[float, None]
start_frequency: Union[float, None]
end_frequency: Union[float, None]
s: Union[float, None]
dt: Union[float, None]
# We have to define an __init__ to enfore keyword-only behaviour
def __init__(
self,
*,
duration=None,
position=None,
start_frequency=None,
end_frequency=None,
s=None,
dt=None,
):
self.duration = duration
self.position = position
self.start_frequency = start_frequency
self.end_frequency = end_frequency
self.s = s
self.dt = dt
| gpl-3.0 | 7,967,172,415,019,972,000 | 41.040881 | 89 | 0.657279 | false |
aggelgian/algorithmic-library | python/data_structures/binary_indexed_tree.py | 1 | 1365 | # -*- coding: utf-8 -*-
"""
Binary Indexed Tree
-------------------
Based on the proposed C/C++ implementation by
Yannis Chatzimichos @ https://git.softlab.ntua.gr/public/pdp-camp/blob/master/2013/advanced_data_structures.pdf
Supports the operations
- ADD S X
Add X to the sum at position S
- SUM X Y
Finds the sum from position X to position Y
The binary indexed tree is 1-indexed.
Time Complexity
All the operations cost O( logn ), where n is the number of bits of the position.
"""
class BIT:
def __init__(self, n):
self.bit = (n + 1) * [0] # Position 0 is not used.
self.n = n
def add(self, pos, x):
while pos <= self.n:
self.bit[pos] += x
pos += (pos & -pos)
def sum(self, x, y):
sy = self.sumFromOne(y)
return sy if x == 1 else sy - self.sumFromOne(x)
def sumFromOne(self, pos):
sum = 0
while pos > 0:
sum += self.bit[pos]
pos -= (pos & -pos)
return sum
if __name__ == "__main__":
xs = [7,0,3,2,3,0,0,4,6,3,2,8]
n = len(xs)
bit = BIT(n)
for i in range(1, n+1):
bit.add(i, xs[i-1])
assert ([bit.sum(1, x) for x in range(1, n+1)]) == [sum(xs[0:n]) for n in range(1, n+1)]
bit.add(5, 3)
assert bit.sum(2, n) == sum(xs[2:]) + 3
| mpl-2.0 | -6,951,622,506,403,230,000 | 25.764706 | 115 | 0.52381 | false |
qedsoftware/commcare-hq | corehq/apps/repeaters/models.py | 1 | 20914 | import base64
from collections import namedtuple
from datetime import datetime, timedelta
import logging
import urllib
import urlparse
from django.utils.translation import ugettext_lazy as _
from requests.exceptions import Timeout, ConnectionError
from corehq.apps.cachehq.mixins import QuickCachedDocumentMixin
from corehq.form_processor.exceptions import XFormNotFound
from corehq.util.datadog.metrics import REPEATER_ERROR_COUNT
from corehq.util.datadog.utils import log_counter
from corehq.util.quickcache import quickcache
from dimagi.ext.couchdbkit import *
from couchdbkit.exceptions import ResourceNotFound
from django.core.cache import cache
import hashlib
from casexml.apps.case.xml import V2, LEGAL_VERSIONS
from corehq.apps.receiverwrapper.exceptions import DuplicateFormatException, IgnoreDocument
from corehq.form_processor.interfaces.dbaccessors import FormAccessors, CaseAccessors
from couchforms.const import DEVICE_LOG_XMLNS
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_datetime
from dimagi.utils.mixins import UnicodeMixIn
from dimagi.utils.post import simple_post
from .dbaccessors import (
get_pending_repeat_record_count,
get_failure_repeat_record_count,
get_success_repeat_record_count,
)
from .const import (
MAX_RETRY_WAIT,
MIN_RETRY_WAIT,
RECORD_FAILURE_STATE,
RECORD_SUCCESS_STATE,
RECORD_PENDING_STATE,
POST_TIMEOUT,
)
from .exceptions import RequestConnectionError
from .utils import get_all_repeater_types
def simple_post_with_cached_timeout(data, url, expiry=60 * 60, force_send=False, *args, **kwargs):
# no control characters (e.g. '/') in keys
key = hashlib.md5(
'{0} timeout {1}'.format(__name__, url)
).hexdigest()
cache_value = cache.get(key)
if cache_value and not force_send:
raise RequestConnectionError(cache_value)
try:
resp = simple_post(data, url, *args, **kwargs)
except (Timeout, ConnectionError), e:
cache.set(key, e.message, expiry)
raise RequestConnectionError(e.message)
if not 200 <= resp.status_code < 300:
message = u'Status Code {}: {}'.format(resp.status_code, resp.reason)
cache.set(key, message, expiry)
raise RequestConnectionError(message)
return resp
DELETED = "-Deleted"
FormatInfo = namedtuple('FormatInfo', 'name label generator_class')
PostInfo = namedtuple('PostInfo', 'payload headers force_send max_tries')
class GeneratorCollection(object):
"""Collection of format_name to Payload Generators for a Repeater class
args:
repeater_class: A valid child class of Repeater class
"""
def __init__(self, repeater_class):
self.repeater_class = repeater_class
self.default_format = ''
self.format_generator_map = {}
def add_new_format(self, format_name, format_label, generator_class, is_default=False):
"""Adds a new format->generator mapping to the collection
args:
format_name: unique name to identify the format
format_label: label to be displayed to the user
generator_class: child class of .repeater_generators.BasePayloadGenerator
kwargs:
is_default: True if the format_name should be default format
exceptions:
raises DuplicateFormatException if format is added with is_default while other
default exists
raises DuplicateFormatException if format_name alread exists in the collection
"""
if is_default and self.default_format:
raise DuplicateFormatException("A default format already exists for this repeater.")
elif is_default:
self.default_format = format_name
if format_name in self.format_generator_map:
raise DuplicateFormatException("There is already a Generator with this format name.")
self.format_generator_map[format_name] = FormatInfo(
name=format_name,
label=format_label,
generator_class=generator_class
)
def get_default_format(self):
"""returns default format"""
return self.default_format
def get_default_generator(self):
"""returns generator class for the default format"""
raise self.format_generator_map[self.default_format].generator_class
def get_all_formats(self, for_domain=None):
"""returns all the formats added to this repeater collection"""
return [(name, format.label) for name, format in self.format_generator_map.iteritems()
if not for_domain or format.generator_class.enabled_for_domain(for_domain)]
def get_generator_by_format(self, format):
"""returns generator class given a format"""
return self.format_generator_map[format].generator_class
class Repeater(QuickCachedDocumentMixin, Document, UnicodeMixIn):
"""
Represents the configuration of a repeater. Will specify the URL to forward to and
other properties of the configuration.
"""
base_doc = 'Repeater'
domain = StringProperty()
url = StringProperty()
format = StringProperty()
use_basic_auth = BooleanProperty(default=False)
username = StringProperty()
password = StringProperty()
friendly_name = _("Data")
@classmethod
def get_custom_url(cls, domain):
return None
@classmethod
def available_for_domain(cls, domain):
"""Returns whether this repeater can be used by a particular domain
"""
return True
def get_pending_record_count(self):
return get_pending_repeat_record_count(self.domain, self._id)
def get_failure_record_count(self):
return get_failure_repeat_record_count(self.domain, self._id)
def get_success_record_count(self):
return get_success_repeat_record_count(self.domain, self._id)
def format_or_default_format(self):
from corehq.apps.repeaters.repeater_generators import RegisterGenerator
return self.format or RegisterGenerator.default_format_by_repeater(self.__class__)
def get_payload_generator(self, payload_format):
from corehq.apps.repeaters.repeater_generators import RegisterGenerator
gen = RegisterGenerator.generator_class_by_repeater_format(self.__class__, payload_format)
return gen(self)
def payload_doc(self, repeat_record):
raise NotImplementedError
def get_payload(self, repeat_record):
generator = self.get_payload_generator(self.format_or_default_format())
return generator.get_payload(repeat_record, self.payload_doc(repeat_record))
def register(self, payload, next_check=None):
if not self.allowed_to_forward(payload):
return
repeat_record = RepeatRecord(
repeater_id=self.get_id,
repeater_type=self.doc_type,
domain=self.domain,
next_check=next_check or datetime.utcnow(),
payload_id=payload.get_id
)
repeat_record.save()
return repeat_record
def allowed_to_forward(self, payload):
"""
Return True/False depending on whether the payload meets forawrding criteria or not
"""
return True
def clear_caches(self):
if self.__class__ == Repeater:
cls = self.get_class_from_doc_type(self.doc_type)
else:
cls = self.__class__
# clear cls.by_domain (i.e. filtered by doc type)
Repeater.by_domain.clear(cls, self.domain)
# clear Repeater.by_domain (i.e. not filtered by doc type)
Repeater.by_domain.clear(Repeater, self.domain)
@classmethod
@quickcache(['cls.__name__', 'domain'], timeout=5 * 60, memoize_timeout=10)
def by_domain(cls, domain):
key = [domain]
if cls.__name__ in get_all_repeater_types():
key.append(cls.__name__)
elif cls.__name__ == Repeater.__name__:
# In this case the wrap function delegates to the
# appropriate sub-repeater types.
pass
else:
# Any repeater type can be posted to the API, and the installed apps
# determine whether we actually know about it.
# But if we do not know about it, then may as well return nothing now
return []
raw_docs = cls.view('receiverwrapper/repeaters',
startkey=key,
endkey=key + [{}],
include_docs=True,
reduce=False,
wrap_doc=False
)
return [cls.wrap(repeater_doc['doc']) for repeater_doc in raw_docs
if cls.get_class_from_doc_type(repeater_doc['doc']['doc_type'])]
@classmethod
def wrap(cls, data):
if cls.__name__ == Repeater.__name__:
cls_ = cls.get_class_from_doc_type(data['doc_type'])
if cls_:
return cls_.wrap(data)
else:
raise ResourceNotFound('Unknown repeater type: %s' % data)
else:
return super(Repeater, cls).wrap(data)
@staticmethod
def get_class_from_doc_type(doc_type):
doc_type = doc_type.replace(DELETED, '')
repeater_types = get_all_repeater_types()
if doc_type in repeater_types:
return repeater_types[doc_type]
else:
return None
def retire(self):
if DELETED not in self['doc_type']:
self['doc_type'] += DELETED
if DELETED not in self['base_doc']:
self['base_doc'] += DELETED
self.save()
def get_url(self, repeate_record):
# to be overridden
return self.url
def allow_retries(self, response):
"""Whether to requeue the repeater when it fails
"""
return True
def allow_immediate_retries(self, response):
"""Whether to retry failed requests immediately a few times
"""
return True
def get_headers(self, repeat_record):
# to be overridden
generator = self.get_payload_generator(self.format_or_default_format())
headers = generator.get_headers()
if self.use_basic_auth:
user_pass = base64.encodestring(':'.join((self.username, self.password))).replace('\n', '')
headers.update({'Authorization': 'Basic ' + user_pass})
return headers
def handle_success(self, response, repeat_record):
"""handle a successful post
"""
generator = self.get_payload_generator(self.format_or_default_format())
return generator.handle_success(response, self.payload_doc(repeat_record))
def handle_failure(self, response, repeat_record):
"""handle a failed post
"""
generator = self.get_payload_generator(self.format_or_default_format())
return generator.handle_failure(response, self.payload_doc(repeat_record))
class FormRepeater(Repeater):
"""
Record that forms should be repeated to a new url
"""
include_app_id_param = BooleanProperty(default=True)
white_listed_form_xmlns = StringListProperty(default=[]) # empty value means all form xmlns are accepted
friendly_name = _("Forward Forms")
@memoized
def payload_doc(self, repeat_record):
return FormAccessors(repeat_record.domain).get_form(repeat_record.payload_id)
def allowed_to_forward(self, payload):
return (
payload.xmlns != DEVICE_LOG_XMLNS and
(not self.white_listed_form_xmlns or payload.xmlns in self.white_listed_form_xmlns)
)
def get_url(self, repeat_record):
url = super(FormRepeater, self).get_url(repeat_record)
if not self.include_app_id_param:
return url
else:
# adapted from http://stackoverflow.com/a/2506477/10840
url_parts = list(urlparse.urlparse(url))
query = urlparse.parse_qsl(url_parts[4])
query.append(("app_id", self.payload_doc(repeat_record).app_id))
url_parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(url_parts)
def get_headers(self, repeat_record):
headers = super(FormRepeater, self).get_headers(repeat_record)
headers.update({
"received-on": self.payload_doc(repeat_record).received_on.isoformat()+"Z"
})
return headers
def __unicode__(self):
return "forwarding forms to: %s" % self.url
class CaseRepeater(Repeater):
"""
Record that cases should be repeated to a new url
"""
version = StringProperty(default=V2, choices=LEGAL_VERSIONS)
white_listed_case_types = StringListProperty(default=[]) # empty value means all case-types are accepted
black_listed_users = StringListProperty(default=[]) # users who caseblock submissions should be ignored
friendly_name = _("Forward Cases")
def allowed_to_forward(self, payload):
return self._allowed_case_type(payload) and self._allowed_user(payload)
def _allowed_case_type(self, payload):
return not self.white_listed_case_types or payload.type in self.white_listed_case_types
def _allowed_user(self, payload):
return self.payload_user_id(payload) not in self.black_listed_users
def payload_user_id(self, payload):
# get the user_id who submitted the payload, note, it's not the owner_id
return payload.actions[-1].user_id
@memoized
def payload_doc(self, repeat_record):
return CaseAccessors(repeat_record.domain).get_case(repeat_record.payload_id)
def get_headers(self, repeat_record):
headers = super(CaseRepeater, self).get_headers(repeat_record)
headers.update({
"server-modified-on": self.payload_doc(repeat_record).server_modified_on.isoformat()+"Z"
})
return headers
def __unicode__(self):
return "forwarding cases to: %s" % self.url
class ShortFormRepeater(Repeater):
"""
Record that form id & case ids should be repeated to a new url
"""
version = StringProperty(default=V2, choices=LEGAL_VERSIONS)
friendly_name = _("Forward Form Stubs")
@memoized
def payload_doc(self, repeat_record):
return FormAccessors(repeat_record.domain).get_form(repeat_record.payload_id)
def allowed_to_forward(self, payload):
return payload.xmlns != DEVICE_LOG_XMLNS
def get_headers(self, repeat_record):
headers = super(ShortFormRepeater, self).get_headers(repeat_record)
headers.update({
"received-on": self.payload_doc(repeat_record).received_on.isoformat()+"Z"
})
return headers
def __unicode__(self):
return "forwarding short form to: %s" % self.url
class AppStructureRepeater(Repeater):
friendly_name = _("Forward App Schema Changes")
def payload_doc(self, repeat_record):
return None
class RepeatRecord(Document):
"""
An record of a particular instance of something that needs to be forwarded
with a link to the proper repeater object
"""
repeater_id = StringProperty()
repeater_type = StringProperty()
domain = StringProperty()
last_checked = DateTimeProperty()
next_check = DateTimeProperty()
succeeded = BooleanProperty(default=False)
failure_reason = StringProperty()
payload_id = StringProperty()
@property
@memoized
def repeater(self):
return Repeater.get(self.repeater_id)
@property
def url(self):
try:
return self.repeater.get_url(self)
except (XFormNotFound, ResourceNotFound):
return None
@property
def state(self):
state = RECORD_PENDING_STATE
if self.succeeded:
state = RECORD_SUCCESS_STATE
elif self.failure_reason:
state = RECORD_FAILURE_STATE
return state
@classmethod
def all(cls, domain=None, due_before=None, limit=None):
json_now = json_format_datetime(due_before or datetime.utcnow())
repeat_records = RepeatRecord.view("receiverwrapper/repeat_records_by_next_check",
startkey=[domain],
endkey=[domain, json_now, {}],
include_docs=True,
reduce=False,
limit=limit,
)
return repeat_records
@classmethod
def count(cls, domain=None):
results = RepeatRecord.view("receiverwrapper/repeat_records_by_next_check",
startkey=[domain],
endkey=[domain, {}],
reduce=True,
).one()
return results['value'] if results else 0
def set_next_try(self, reason=None):
# we use an exponential back-off to avoid submitting to bad urls
# too frequently.
assert self.succeeded is False
assert self.next_check is not None
now = datetime.utcnow()
window = timedelta(minutes=0)
if self.last_checked:
window = self.next_check - self.last_checked
window += (window // 2) # window *= 1.5
if window < MIN_RETRY_WAIT:
window = MIN_RETRY_WAIT
elif window > MAX_RETRY_WAIT:
window = MAX_RETRY_WAIT
self.last_checked = now
self.next_check = self.last_checked + window
def try_now(self):
# try when we haven't succeeded and either we've
# never checked, or it's time to check again
return not self.succeeded
def get_payload(self):
try:
return self.repeater.get_payload(self)
except ResourceNotFound as e:
# this repeater is pointing at a missing document
# quarantine it and tell it to stop trying.
logging.exception(
u'Repeater {} in domain {} references a missing or deleted document!'.format(
self._id, self.domain,
))
self._payload_exception(e, reraise=False)
except IgnoreDocument:
# this repeater is pointing at a document with no payload
logging.info(u'Repeater {} in domain {} references a document with no payload'.format(
self._id, self.domain,
))
# Mark it succeeded so that we don't try again
self.update_success()
except Exception as e:
self._payload_exception(e, reraise=True)
def _payload_exception(self, exception, reraise=False):
self.doc_type = self.doc_type + '-Failed'
self.failure_reason = unicode(exception)
self.save()
if reraise:
raise exception
def fire(self, max_tries=3, force_send=False):
headers = self.repeater.get_headers(self)
if self.try_now() or force_send:
tries = 0
post_info = PostInfo(self.get_payload(), headers, force_send, max_tries)
self.post(post_info, tries=tries)
def post(self, post_info, tries=0):
tries += 1
try:
response = simple_post_with_cached_timeout(
post_info.payload,
self.url,
headers=post_info.headers,
force_send=post_info.force_send,
timeout=POST_TIMEOUT,
)
except Exception, e:
self.handle_exception(e)
else:
return self.handle_response(response, post_info, tries)
def handle_response(self, response, post_info, tries):
if 200 <= response.status_code < 300:
return self.handle_success(response)
else:
return self.handle_failure(response, post_info, tries)
def handle_success(self, response):
"""Do something with the response if the repeater succeeds
"""
self.last_checked = datetime.utcnow()
self.next_check = None
self.succeeded = True
self.repeater.handle_success(response, self)
def handle_failure(self, response, post_info, tries):
"""Do something with the response if the repeater fails
"""
if tries < post_info.max_tries and self.repeater.allow_immediate_retries(response):
return self.post(post_info, tries)
else:
self._fail(u'{}: {}'.format(response.status_code, response.reason), response)
self.repeater.handle_failure(response, self)
def handle_exception(self, exception):
"""handle internal exceptions
"""
self._fail(unicode(exception), None)
def _fail(self, reason, response):
if self.repeater.allow_retries(response):
self.set_next_try()
self.failure_reason = reason
log_counter(REPEATER_ERROR_COUNT, {
'_id': self._id,
'reason': reason,
'target_url': self.url,
})
# import signals
# Do not remove this import, its required for the signals code to run even though not explicitly used in this file
from corehq.apps.repeaters import signals
| bsd-3-clause | -7,446,610,611,473,382,000 | 33.973244 | 114 | 0.639572 | false |
iffy/ppo | ppo/parse_plugins/sqlite_plugin.py | 1 | 2316 | # Copyright (c) The ppo team
# See LICENSE for details.
import tempfile
import os
from ppo import plugins
from structlog import get_logger
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
import sqlite3 as sqlite
class SQLite3Parser(plugins.ParserPlugin):
"""
I parse SQLite databases (the raw database,
not the output of queries)
"""
name = 'sqlite-db'
def readProbability(self, instream):
firstpart = instream.read(20)
if firstpart.startswith(b'SQLite format 3\x00'):
return 60
else:
return 0
def parse(self, instream):
# XXX it would be nice to not have to write to a temporary file :(
log = get_logger()
log = log.bind(plugin=self.name)
tempdir = tempfile.mkdtemp()
tmpdb = os.path.join(tempdir, 'db.sqlite')
result = {'tables': {}}
try:
with open(tmpdb, 'wb') as fh:
log.msg('Copying data to temp file', filename=tmpdb)
fh.write(instream.read())
db = sqlite.connect(tmpdb)
db.row_factory = sqlite.Row
log.msg('Connected to db', filename=tmpdb)
r = db.execute(
"select tbl_name "
"from sqlite_master "
"where type='table'")
tables = [x[0] for x in r.fetchall()]
log.msg('Found these tables', tables=tables)
for table in tables:
l = log.bind(table=table)
r = db.execute(
"select * from %s" % (table,))
l.msg('Found rows', rowcount=r.rowcount)
data = [dict(x) for x in r.fetchall()]
result['tables'][table] = data
db.close()
return result
except:
raise
finally:
try:
os.remove(tmpdb)
log.msg('Removed temporary file', filename=tmpdb)
except:
log.msg('Error removing temporary file', filename=tmpdb, exc_info=True)
try:
os.rmdir(tempdir)
log.msg('Removed temporary dir', directory=tempdir)
except:
log.msg('Error removing temporary dir', directory=tempdir, exc_info=True)
| apache-2.0 | -7,123,455,095,867,599,000 | 29.473684 | 89 | 0.536269 | false |
chippey/gaffer | python/GafferUI/UIEditor.py | 1 | 57874 | ##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import functools
import types
import re
import collections
import IECore
import Gaffer
import GafferUI
## The UIEditor class allows the user to edit the interfaces for nodes.
class UIEditor( GafferUI.NodeSetEditor ) :
def __init__( self, scriptNode, **kw ) :
self.__frame = GafferUI.Frame( borderWidth = 4, borderStyle = GafferUI.Frame.BorderStyle.None )
GafferUI.NodeSetEditor.__init__( self, self.__frame, scriptNode, **kw )
# Build the UI. Initially this isn't connected up to any node - we'll
# perform the connections in _updateFromSet().
######################################################################
self.__nodeMetadataWidgets = []
self.__plugMetadataWidgets = []
with self.__frame :
self.__tabbedContainer = GafferUI.TabbedContainer()
with self.__tabbedContainer :
# Node tab
with GafferUI.ListContainer( spacing = 4, borderWidth = 8, parenting = { "label" : "Node" } ) as self.__nodeTab :
with _Row() :
_Label( "Name" )
self.__nodeNameWidget = GafferUI.NameWidget( None )
with _Row() :
_Label( "Description", parenting = { "verticalAlignment" : GafferUI.ListContainer.VerticalAlignment.Top } )
self.__nodeMetadataWidgets.append(
_MultiLineStringMetadataWidget( key = "description" )
)
with _Row() :
_Label( "Color" )
self.__nodeMetadataWidgets.append(
_ColorSwatchMetadataWidget( key = "nodeGadget:color" )
)
# Plugs tab
with GafferUI.SplitContainer( orientation=GafferUI.SplitContainer.Orientation.Horizontal, borderWidth = 8, parenting = { "label" : "Plugs" } ) as self.__plugTab :
self.__plugListing = _PlugListing()
self.__plugListingSelectionChangedConnection = self.__plugListing.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__plugListingSelectionChanged ) )
with GafferUI.TabbedContainer() as self.__plugAndSectionEditorsContainer :
self.__plugEditor = _PlugEditor()
self.__sectionEditor = _SectionEditor()
self.__sectionEditorNameChangedConnection = self.__sectionEditor.nameChangedSignal().connect( Gaffer.WeakMethod( self.__sectionEditorNameChanged ) )
self.__plugAndSectionEditorsContainer.setTabsVisible( False )
self.__plugTab.setSizes( [ 0.3, 0.7 ] )
# initialise our selection to nothing
self.__node = None
self.__selectedPlug = None
# call __updateFromSetInternal() to populate our selection and connect
# the ui to it. we pass lazy==False to avoid the early out if
# there is no currently selected node.
self.__updateFromSetInternal( lazy=False )
# Selection can be None, a Plug, or the name of a section.
def setSelection( self, selection ) :
self.__plugListing.setSelection( selection )
def getSelection( self ) :
return self.__plugListing.getSelection()
## Returns the widget layout responsible for editing the node as a whole.
def nodeEditor( self ) :
return self.__nodeTab
## Returns the widget layout responsible for editing individual plugs.
def plugEditor( self ) :
return self.__plugTab
@classmethod
def appendNodeContextMenuDefinitions( cls, nodeGraph, node, menuDefinition ) :
menuDefinition.append( "/UIEditorDivider", { "divider" : True } )
menuDefinition.append(
"/Set Color...",
{
"command" : functools.partial( cls.__setColor, node = node ),
"active" : not Gaffer.readOnly( node ),
}
)
@classmethod
def appendNodeEditorToolMenuDefinitions( cls, nodeEditor, node, menuDefinition ) :
menuDefinition.append(
"/Edit UI...",
{
"command" : functools.partial( GafferUI.UIEditor.acquire, node ),
"active" : (
( isinstance( node, Gaffer.Box ) or nodeEditor.nodeUI().plugValueWidget( node["user"] ) is not None ) and
not Gaffer.readOnly( node )
)
}
)
def _updateFromSet( self ) :
GafferUI.NodeSetEditor._updateFromSet( self )
self.__updateFromSetInternal()
def __updateFromSetInternal( self, lazy=True ) :
node = self._lastAddedNode()
if lazy and node == self.__node :
return
self.__node = node
self.__nodeNameWidget.setGraphComponent( self.__node )
self.__nodeTab.setEnabled( self.__node is not None )
if self.__node is None :
self.__plugListing.setPlugParent( None )
self.__sectionEditor.setPlugParent( None )
else :
plugParent = self.__node["user"]
if isinstance( self.__node, Gaffer.Box ) :
# For Boxes we want the user to edit the plugs directly
# parented to the Box, because that is where promoted plugs go,
# and because we want to leave the "user" plug empty so that it
# is available for use by the user on Reference nodes once a Box has
# been exported and referenced.
plugParent = self.__node
self.__plugListing.setPlugParent( plugParent )
self.__sectionEditor.setPlugParent( plugParent )
for widget in self.__nodeMetadataWidgets :
widget.setTarget( self.__node )
self.setSelection( None )
def __plugListingSelectionChanged( self, listing ) :
selection = listing.getSelection()
if selection is None or isinstance( selection, Gaffer.Plug ) :
self.__plugEditor.setPlug( selection )
self.__plugAndSectionEditorsContainer.setCurrent( self.__plugEditor )
elif isinstance( selection, basestring ) :
self.__plugEditor.setPlug( None )
self.__sectionEditor.setSection( selection )
self.__plugAndSectionEditorsContainer.setCurrent( self.__sectionEditor )
def __sectionEditorNameChanged( self, sectionEditor, oldName, newName ) :
# When the name changed, our plug listing will have lost its
# selection. So give it a helping hand.
self.__plugListing.setSelection( newName )
def __repr__( self ) :
return "GafferUI.UIEditor( scriptNode )"
@classmethod
def __setColor( cls, menu, node ) :
color = Gaffer.Metadata.value( node, "nodeGadget:color" ) or IECore.Color3f( 1 )
dialogue = GafferUI.ColorChooserDialogue( color = color, useDisplayTransform = False )
color = dialogue.waitForColor( parentWindow = menu.ancestor( GafferUI.Window ) )
if color is not None :
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( node, "nodeGadget:color", color )
GafferUI.EditorWidget.registerType( "UIEditor", UIEditor )
##########################################################################
# PlugValueWidget popup menu
##########################################################################
def __editPlugUI( node, plug ) :
editor = GafferUI.UIEditor.acquire( node )
editor.setSelection( plug )
editor.plugEditor().reveal()
def __plugPopupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
node = plug.node()
if node is None :
return
if isinstance( node, Gaffer.Box ) :
if not plug.parent().isSame( node ) :
return
else :
if not plug.parent().isSame( node["user"] ) :
return
menuDefinition.append( "/EditUIDivider", { "divider" : True } )
menuDefinition.append( "/Edit UI...",
{
"command" : IECore.curry( __editPlugUI, node, plug ),
"active" : not plugValueWidget.getReadOnly() and not Gaffer.readOnly( plug )
}
)
__plugPopupMenuConnection = GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu )
##########################################################################
# Simple fixed width label and row classes
##########################################################################
class _Label( GafferUI.Label ) :
def __init__( self, *args, **kw ) :
GafferUI.Label.__init__(
self,
horizontalAlignment = GafferUI.Label.HorizontalAlignment.Right,
*args, **kw
)
self._qtWidget().setFixedWidth( 110 )
class _Row( GafferUI.ListContainer ) :
def __init__( self, *args, **kw ) :
GafferUI.ListContainer.__init__( self, GafferUI.ListContainer.Orientation.Horizontal, spacing = 4, *args, **kw )
##########################################################################
# MetadataValueWidgets. These display metadata values, allowing the user
# to edit them.
##########################################################################
class _MetadataWidget( GafferUI.Widget ) :
def __init__( self, topLevelWidget, key, target = None, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__key = key
self.__target = None
self.setTarget( target )
def setTarget( self, target ) :
assert( isinstance( target, ( Gaffer.Node, Gaffer.Plug, type( None ) ) ) )
self.__target = target
self.setEnabled( self.__target is not None )
if isinstance( self.__target, Gaffer.Node ) :
self.__metadataChangedConnection = Gaffer.Metadata.nodeValueChangedSignal().connect(
Gaffer.WeakMethod( self.__nodeMetadataChanged )
)
elif isinstance( self.__target, Gaffer.Plug ) :
self.__metadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect(
Gaffer.WeakMethod( self.__plugMetadataChanged )
)
else :
self.__metadataChangedConnection = None
self.__update()
def getTarget( self ) :
return self.__target
def setKey( self, key ) :
if key == self.__key :
return
self.__key = key
self.__update()
def getKey( self, key ) :
return self.__key
## Must be implemented in derived classes to update
# the widget from the value.
def _updateFromValue( self, value ) :
raise NotImplementedError
## Must be called by derived classes to update
# the Metadata value when the widget value changes.
def _updateFromWidget( self, value ) :
if self.__target is None :
return
with Gaffer.UndoContext( self.__target.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( self.__target, self.__key, value )
## May be called by derived classes to deregister the
# metadata value.
def _deregisterValue( self ) :
if self.__target is None :
return
with Gaffer.UndoContext( self.__target.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.deregisterValue( self.__target, self.__key )
def __update( self ) :
if self.__target is not None :
self._updateFromValue( Gaffer.Metadata.value( self.__target, self.__key ) )
else :
self._updateFromValue( None )
def __nodeMetadataChanged( self, nodeTypeId, key, node ) :
if self.__key != key :
return
if node is not None and not node.isSame( self.__target ) :
return
if not self.__target.isInstanceOf( nodeTypeId ) :
return
self.__update()
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if self.__key != key :
return
if Gaffer.affectedByChange( self.__target, nodeTypeId, plugPath, plug ) :
self.__update()
class _BoolMetadataWidget( _MetadataWidget ) :
def __init__( self, key, target = None, **kw ) :
self.__boolWidget = GafferUI.BoolWidget()
_MetadataWidget.__init__( self, self.__boolWidget, key, target, **kw )
self.__stateChangedConnection = self.__boolWidget.stateChangedSignal().connect(
Gaffer.WeakMethod( self.__stateChanged )
)
def _updateFromValue( self, value ) :
self.__boolWidget.setState( value if value is not None else False )
def __stateChanged( self, *unused ) :
self._updateFromWidget( self.__boolWidget.getState() )
class _StringMetadataWidget( _MetadataWidget ) :
def __init__( self, key, target = None, acceptEmptyString = True, **kw ) :
self.__textWidget = GafferUI.TextWidget()
_MetadataWidget.__init__( self, self.__textWidget, key, target, **kw )
self.__acceptEmptyString = acceptEmptyString
self.__editingFinishedConnection = self.__textWidget.editingFinishedSignal().connect(
Gaffer.WeakMethod( self.__editingFinished )
)
def textWidget( self ) :
return self.__textWidget
def _updateFromValue( self, value ) :
self.__textWidget.setText( value if value is not None else "" )
def __editingFinished( self, *unused ) :
text = self.__textWidget.getText()
if text or self.__acceptEmptyString :
self._updateFromWidget( text )
else :
self._deregisterValue()
class _MultiLineStringMetadataWidget( _MetadataWidget ) :
def __init__( self, key, target = None, **kw ) :
self.__textWidget = GafferUI.MultiLineTextWidget()
_MetadataWidget.__init__( self, self.__textWidget, key, target, **kw )
self.__editingFinishedConnection = self.__textWidget.editingFinishedSignal().connect(
Gaffer.WeakMethod( self.__editingFinished )
)
def textWidget( self ) :
return self.__textWidget
def _updateFromValue( self, value ) :
self.__textWidget.setText( value if value is not None else "" )
def __editingFinished( self, *unused ) :
self._updateFromWidget( self.__textWidget.getText() )
class _ColorSwatchMetadataWidget( _MetadataWidget ) :
def __init__( self, key, target = None, **kw ) :
self.__swatch = GafferUI.ColorSwatch( useDisplayTransform = False )
_MetadataWidget.__init__( self, self.__swatch, key, target, **kw )
self.__swatch._qtWidget().setFixedHeight( 18 )
self.__swatch._qtWidget().setMaximumWidth( 40 )
self.__value = None
self.__buttonReleaseConnection = self.__swatch.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ) )
def _updateFromValue( self, value ) :
if value is not None :
self.__swatch.setColor( value )
else :
self.__swatch.setColor( IECore.Color4f( 0, 0, 0, 0 ) )
self.__value = value
def __buttonRelease( self, swatch, event ) :
if event.button != event.Buttons.Left :
return False
color = self.__value if self.__value is not None else IECore.Color3f( 1 )
dialogue = GafferUI.ColorChooserDialogue( color = color, useDisplayTransform = False )
color = dialogue.waitForColor( parentWindow = self.ancestor( GafferUI.Window ) )
if color is not None :
self._updateFromWidget( color )
class _MenuMetadataWidget( _MetadataWidget ) :
def __init__( self, key, labelsAndValues, target = None, **kw ) :
self.__menuButton = GafferUI.MenuButton(
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) )
)
self.__labelsAndValues = labelsAndValues
self.__currentValue = None
_MetadataWidget.__init__( self, self.__menuButton, key, target, **kw )
def _updateFromValue( self, value ) :
self.__currentValue = value
buttonText = str( value )
for label, value in self.__labelsAndValues :
if value == self.__currentValue :
buttonText = label
break
self.__menuButton.setText( buttonText )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
for label, value in self.__labelsAndValues :
result.append(
"/" + label,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setValue ), value = value ),
"checkBox" : value == self.__currentValue
}
)
return result
def __setValue( self, unused, value ) :
self._updateFromWidget( value )
##########################################################################
# Hierarchical representation of a plug layout, suitable for manipulating
# by the _PlugListing.
# \todo Consider sharing this data structure with the PlugLayout itself,
# rather than each using a different internal representation. If we did
# this then the data structure itself should take care of the mapping
# to/from metadata.
##########################################################################
class _LayoutItem( object ) :
def __init__( self ) :
self.__parent = None
self.__children = []
def parent( self ) :
if self.__parent is None :
return None
else :
return self.__parent()
def child( self, name ) :
for c in self.__children :
if c.name() == name :
return c
return None
def isAncestorOf( self, item ) :
while item is not None :
parent = item.parent()
if parent is self :
return True
item = parent
return False
def append( self, child ) :
self.insert( len( self ), child )
def insert( self, index, child ) :
assert( child.parent() is None )
self.__children.insert( index, child )
child.__parent = weakref.ref( self )
def remove( self, child ) :
assert( child.parent() is self )
self.__children.remove( child )
child.__parent = None
def index( self, child ) :
return self.__children.index( child )
def name( self ) :
raise NotImplementedError
def fullName( self ) :
result = ""
item = self
while item.parent() is not None :
if result :
result = item.name() + "." + result
else :
result = item.name()
item = item.parent()
return result
def __len__( self ) :
return len( self.__children )
def __getitem__( self, index ) :
return self.__children[index]
class _SectionLayoutItem( _LayoutItem ) :
def __init__( self, sectionName ) :
_LayoutItem.__init__( self )
self.__sectionName = sectionName
def name( self ) :
return self.__sectionName
class _PlugLayoutItem( _LayoutItem ) :
def __init__( self, plug ) :
_LayoutItem.__init__( self )
self.plug = plug
self.__name = plug.getName()
def name( self ) :
return self.__name
##########################################################################
# _PlugListing. This is used to list the plugs in the UIEditor,
# organised into their respective sections.
##########################################################################
class _PlugListing( GafferUI.Widget ) :
class __LayoutPath( Gaffer.Path ) :
def __init__( self, rootItem, path, root="/", filter = None ) :
Gaffer.Path.__init__( self, path, root, filter )
self.__rootItem = rootItem
def rootItem( self ) :
return self.__rootItem
def item( self ) :
result = self.__rootItem
for name in self :
result = result.child( name )
if result is None :
return None
return result
def copy( self ) :
return self.__class__( self.__rootItem, self[:], self.root(), self.getFilter() )
def isLeaf( self ) :
return not isinstance( self.item(), _SectionLayoutItem )
def isValid( self ) :
return self.item() is not None
def _children( self ) :
item = self.item()
if item is None :
return []
result = [
self.__class__( self.__rootItem, self[:] + [ c.name() ], self.root(), self.getFilter() )
for c in item
]
# Add a placeholder child into empty sections, to be used as a drag target
# in __dragMove()
if len( result ) == 0 and isinstance( item, _SectionLayoutItem ) :
result.append( self.__class__( self.__rootItem, self[:] + [ " " ], self.root(), self.getFilter() ) )
return result
def __init__( self, **kw ) :
column = GafferUI.ListContainer( spacing = 4 )
GafferUI.Widget.__init__( self, column, **kw )
with column :
self.__pathListing = GafferUI.PathListingWidget(
self.__LayoutPath( _SectionLayoutItem( "" ), "/" ),
# listing displays the plug name and automatically sorts based on plug index
columns = ( GafferUI.PathListingWidget.defaultNameColumn, ),
displayMode = GafferUI.PathListingWidget.DisplayMode.Tree,
)
self.__pathListing.setDragPointer( "" )
self.__pathListing.setSortable( False )
self.__pathListing.setHeaderVisible( False )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
GafferUI.MenuButton(
image = "plus.png",
hasFrame = False,
menu = GafferUI.Menu(
definition = Gaffer.WeakMethod( self.__addMenuDefinition )
)
)
self.__deleteButton = GafferUI.Button( image = "minus.png", hasFrame = False )
self.__deleteButtonClickedConnection = self.__deleteButton.clickedSignal().connect( Gaffer.WeakMethod( self.__deleteButtonClicked ) )
self.__parent = None # the parent of the plugs we're listing
self.__dragItem = None
self.__selectionChangedSignal = Gaffer.Signal1()
self.__dragEnterConnection = self.__pathListing.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dragMoveConnection = self.__pathListing.dragMoveSignal().connect( Gaffer.WeakMethod( self.__dragMove ) )
self.__dragEndConnection = self.__pathListing.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ) )
self.__selectionChangedConnection = self.__pathListing.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__selectionChanged ) )
self.__keyPressConnection = self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__nodeMetadataChangedConnection = Gaffer.Metadata.nodeValueChangedSignal().connect( Gaffer.WeakMethod( self.__nodeMetadataChanged ) )
self.__plugMetadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ) )
def setPlugParent( self, parent ) :
assert( isinstance( parent, ( Gaffer.Plug, Gaffer.Node, types.NoneType ) ) )
self.__parent = parent
self.__childAddedConnection = None
self.__childRemovedConnection = None
self.__childNameChangedConnections = {}
if self.__parent is not None :
self.__childAddedConnection = self.__parent.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ) )
self.__childRemovedConnection = self.__parent.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ) )
for child in self.__parent.children() :
self.__updateChildNameChangedConnection( child )
self.__updatePath()
def getPlugParent( self ) :
return self.__parent
# Selection can be None, a Plug, or the name of a section.
def setSelection( self, selection ) :
self.__updatePathLazily.flush( self )
def findPlugPath( path, plug ) :
item = path.item()
if isinstance( item, _PlugLayoutItem ) and item.plug.isSame( plug ) :
return path
else :
for child in path.children() :
r = findPlugPath( child, plug )
if r is not None :
return r
return None
if isinstance( selection, Gaffer.Plug ) :
path = findPlugPath( self.__pathListing.getPath(), selection )
if path is None :
self.__pathListing.setSelectedPaths( [] )
else :
self.__pathListing.setSelectedPaths( [ path ] )
elif isinstance( selection, basestring ) :
path = self.__pathListing.getPath().copy()
path[:] = selection.split( "." )
self.__pathListing.setSelectedPaths( [ path ] )
else :
assert( selection is None )
self.__pathListing.setSelectedPaths( [] )
def getSelection( self ) :
item = self.__selectedItem()
if item is None :
return None
elif isinstance( item, _PlugLayoutItem ) :
return item.plug
elif isinstance( item, _SectionLayoutItem ) :
return item.fullName()
else :
return None
def selectionChangedSignal( self ) :
return self.__selectionChangedSignal
# Updates the path we show in the listing by building a layout based
# on the metadata.
def __updatePath( self ) :
if self.__parent is None :
# we have nothing to show - early out.
self.__pathListing.setPath( self.__LayoutPath( _SectionLayoutItem( "" ), "/" ) )
return
def section( rootLayoutItem, sectionPath ) :
sectionItem = rootLayoutItem
if sectionPath != "" :
for sectionName in sectionPath.split( "." ) :
childSectionItem = sectionItem.child( sectionName )
if childSectionItem is None :
childSectionItem = _SectionLayoutItem( sectionName )
sectionItem.append( childSectionItem )
sectionItem = childSectionItem
return sectionItem
layout = _SectionLayoutItem( "" )
for sectionPath in GafferUI.PlugLayout.layoutSections( self.__parent ) :
if sectionPath == "User" and isinstance( self.__parent, Gaffer.Node ) :
continue
sectionItem = section( layout, sectionPath )
for plug in GafferUI.PlugLayout.layoutOrder( self.__parent, section = sectionPath ) :
sectionItem.append( _PlugLayoutItem( plug ) )
emptySections = Gaffer.Metadata.value( self.getPlugParent(), "uiEditor:emptySections" )
emptySectionIndices = Gaffer.Metadata.value( self.getPlugParent(), "uiEditor:emptySectionIndices" )
if emptySections and emptySectionIndices :
for sectionPath, sectionIndex in zip( emptySections, emptySectionIndices ) :
parentPath, unused, sectionName = sectionPath.rpartition( "." )
parentSection = section( layout, parentPath )
if parentSection.child( sectionName ) is None :
parentSection.insert( sectionIndex, _SectionLayoutItem( sectionName ) )
if len( layout ) == 0 and isinstance( self.__parent, Gaffer.Node ) :
layout.append( _SectionLayoutItem( "Settings" ) )
expandedPaths = self.__pathListing.getExpandedPaths()
self.__pathListing.setPath( self.__LayoutPath( layout, "/" ) )
self.__pathListing.setExpandedPaths( expandedPaths )
@GafferUI.LazyMethod()
def __updatePathLazily( self ) :
self.__updatePath()
# Updates the metadata that controls the plug layout from the layout
# we show in the listing.
def __updateMetadata( self ) :
# Because sections only really exist by virtue of being requested
# by a plug, we must store empty sections separately for ourselves.
emptySections = IECore.StringVectorData()
emptySectionIndices = IECore.IntVectorData()
def walk( layoutItem, path = "", index = 0 ) :
for childItem in layoutItem :
if isinstance( childItem, _PlugLayoutItem ) :
Gaffer.Metadata.registerValue( childItem.plug, "layout:section", path )
Gaffer.Metadata.registerValue( childItem.plug, "layout:index", index )
index += 1
elif isinstance( childItem, _SectionLayoutItem ) :
childPath = path + "." + childItem.name() if path else childItem.name()
if len( childItem ) :
index = walk( childItem, childPath, index )
else :
emptySections.append( childPath )
emptySectionIndices.append( layoutItem.index( childItem ) )
return index
with Gaffer.BlockedConnection( self.__plugMetadataChangedConnection ) :
walk( self.__pathListing.getPath().copy().setFromString( "/" ).item() )
Gaffer.Metadata.registerValue( self.getPlugParent(), "uiEditor:emptySections", emptySections )
Gaffer.Metadata.registerValue( self.getPlugParent(), "uiEditor:emptySectionIndices", emptySectionIndices )
def __childAddedOrRemoved( self, parent, child ) :
assert( parent.isSame( self.__parent ) )
self.__updateChildNameChangedConnection( child )
self.__updatePathLazily()
def __childNameChanged( self, child ) :
selection = self.getSelection()
self.__updatePath()
if isinstance( selection, Gaffer.Plug ) and child.isSame( selection ) :
# because the plug's name has changed. the path needed to
# keep it selected is different too, so we have to manually
# restore the selection.
self.setSelection( selection )
def __updateChildNameChangedConnection( self, child ) :
if self.__parent.isSame( child.parent() ) :
if child not in self.__childNameChangedConnections :
self.__childNameChangedConnections[child] = child.nameChangedSignal().connect( Gaffer.WeakMethod( self.__childNameChanged ) )
else :
if child in self.__childNameChangedConnections :
del self.__childNameChangedConnections[child]
def __dragEnter( self, listing, event ) :
# accept the drag if it originates with us,
# so __dragMove and __drop can implement
# drag and drop reordering of plugs.
if event.sourceWidget is not self.__pathListing :
return False
if not isinstance( event.data, IECore.StringVectorData ) :
return False
dragPath = self.__pathListing.getPath().copy().setFromString( event.data[0] )
self.__dragItem = dragPath.item()
# dragging around entire open sections is a bit confusing, so don't
self.__pathListing.setPathExpanded( dragPath, False )
return True
def __dragMove( self, listing, event ) :
if self.__dragItem is None :
return False
# update our layout structure to reflect the drag
#################################################
# find newParent and newIndex - variables specifying
# the new location for the dragged item.
targetPath = self.__pathListing.pathAt( event.line.p0 )
if targetPath is not None :
targetItem = targetPath.item()
if targetItem is not None :
if isinstance( targetItem, _SectionLayoutItem ) and self.__pathListing.getPathExpanded( targetPath ) and targetItem.parent() is self.__dragItem.parent() :
newParent = targetItem
newIndex = 0
else :
newParent = targetItem.parent()
newIndex = newParent.index( targetItem )
else :
# target is a placeholder added into an empty
# section by __LayoutPath._children().
newParent = targetPath.copy().truncateUntilValid().item()
newIndex = 0
else :
# drag has gone above or below all listed items
newParent = self.__pathListing.getPath().rootItem()
newIndex = 0 if event.line.p0.y < 1 else len( newParent )
# skip any attempted circular reparenting
if newParent is self.__dragItem or self.__dragItem.isAncestorOf( newParent ) :
return True
# disallow drags that would place a plug below a section
firstNonPlugIndex = next(
( x[0] for x in enumerate( newParent ) if not isinstance( x[1], _PlugLayoutItem ) ),
len( newParent )
)
if self.__dragItem.parent() is newParent and newParent.index( self.__dragItem ) < firstNonPlugIndex :
firstNonPlugIndex -= 1
if isinstance( self.__dragItem, _PlugLayoutItem ) :
if newIndex > firstNonPlugIndex :
return True
else :
if newIndex < firstNonPlugIndex :
newIndex = max( newIndex, firstNonPlugIndex )
self.__dragItem.parent().remove( self.__dragItem )
newParent.insert( newIndex, self.__dragItem )
# let the listing know we've been monkeying behind the scenes.
# we need to update the selection, because when we reparented
# the drag item its path will have changed.
##############################################################
self.__pathListing.getPath().pathChangedSignal()( self.__pathListing.getPath() )
selection = self.__pathListing.getPath().copy()
selection[:] = self.__dragItem.fullName().split( "." )
self.__pathListing.setSelectedPaths( [ selection ], scrollToFirst = False, expandNonLeaf = False )
return True
def __dragEnd( self, listing, event ) :
if self.__dragItem is None :
return False
with Gaffer.UndoContext( self.__parent.ancestor( Gaffer.ScriptNode ) ) :
self.__updateMetadata()
self.__dragItem = None
return True
def __selectionChanged( self, pathListing ) :
self.__deleteButton.setEnabled( bool( pathListing.getSelectedPaths() ) )
self.__selectionChangedSignal( self )
def __deleteButtonClicked( self, button ) :
self.__deleteSelected()
def __nodeMetadataChanged( self, nodeTypeId, key, node ) :
if self.__parent is None :
return
if node is not None and not self.__parent.isSame( node ) :
return
if not self.__parent.isInstanceOf( nodeTypeId ) :
return
if key in ( "uiEditor:emptySections", "uiEditor:emptySectionIndices" ) :
self.__updatePathLazily()
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if self.__parent is None :
return
parentAffected = isinstance( self.__parent, Gaffer.Plug ) and Gaffer.affectedByChange( self.__parent, nodeTypeId, plugPath, plug )
childAffected = Gaffer.childAffectedByChange( self.__parent, nodeTypeId, plugPath, plug )
if not parentAffected and not childAffected :
return
if key in ( "layout:index", "layout:section", "uiEditor:emptySections", "uiEditor:emptySectionIndices" ) :
self.__updatePathLazily()
def __keyPress( self, widget, event ) :
assert( widget is self )
if event.key == "Backspace" or event.key == "Delete" :
self.__deleteSelected()
return True
return False
def __addMenuDefinition( self ) :
m = IECore.MenuDefinition()
m.append( "/Add Plug/Bool", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.BoolPlug ) } )
m.append( "/Add Plug/Float", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.FloatPlug ) } )
m.append( "/Add Plug/Int", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.IntPlug ) } )
m.append( "/Add Plug/NumericDivider", { "divider" : True } )
m.append( "/Add Plug/String", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.StringPlug ) } )
m.append( "/Add Plug/StringDivider", { "divider" : True } )
m.append( "/Add Plug/V2i", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.V2iPlug ) } )
m.append( "/Add Plug/V3i", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.V3iPlug ) } )
m.append( "/Add Plug/V2f", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.V2fPlug ) } )
m.append( "/Add Plug/V3f", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.V3fPlug ) } )
m.append( "/Add Plug/VectorDivider", { "divider" : True } )
m.append( "/Add Plug/Color3f", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.Color3fPlug ) } )
m.append( "/Add Plug/Color4f", { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), Gaffer.Color4fPlug ) } )
m.append( "/Add Plug Divider", { "divider" : True } )
m.append( "/Add Section", { "command" : Gaffer.WeakMethod( self.__addSection ) } )
return m
def __addPlug( self, plugType ) :
plug = plugType( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
Gaffer.Metadata.registerValue( plug, "nodule:type", "" )
parentItem = self.__selectedItem()
if parentItem is not None :
while not isinstance( parentItem, _SectionLayoutItem ) :
parentItem = parentItem.parent()
else :
parentItem = self.__pathListing.getPath().rootItem()
parentItem = next(
( c for c in parentItem if isinstance( c, _SectionLayoutItem ) ),
parentItem
)
Gaffer.Metadata.registerValue( plug, "layout:section", parentItem.fullName() )
with Gaffer.UndoContext( self.__parent.ancestor( Gaffer.ScriptNode ) ) :
self.getPlugParent().addChild( plug )
self.__updatePathLazily.flush( self )
self.setSelection( plug )
def __addSection( self ) :
rootItem = self.__pathListing.getPath().rootItem()
existingSectionNames = set( c.name() for c in rootItem if isinstance( c, _SectionLayoutItem ) )
name = "New Section"
index = 1
while name in existingSectionNames :
name = "New Section %d" % index
index += 1
rootItem.append( _SectionLayoutItem( name ) )
self.__pathListing.getPath().pathChangedSignal()( self.__pathListing.getPath() )
with Gaffer.UndoContext( self.__parent.ancestor( Gaffer.ScriptNode ) ) :
self.__updateMetadata()
self.__pathListing.setSelectedPaths(
self.__pathListing.getPath().copy().setFromString( "/" + name )
)
def __selectedItem( self ) :
selectedPaths = self.__pathListing.getSelectedPaths()
if not len( selectedPaths ) :
return None
assert( len( selectedPaths ) == 1 )
return selectedPaths[0].item()
def __deleteSelected( self ) :
selectedItem = self.__selectedItem()
if selectedItem is None :
return
selectedItem.parent().remove( selectedItem )
def deletePlugsWalk( item ) :
if isinstance( item, _PlugLayoutItem ) :
item.plug.parent().removeChild( item.plug )
else :
for childItem in item :
deletePlugsWalk( childItem )
with Gaffer.UndoContext( self.__parent.ancestor( Gaffer.ScriptNode ) ) :
deletePlugsWalk( selectedItem )
self.__updateMetadata()
##########################################################################
# _PresetsEditor. This provides a ui for editing the presets for a plug.
##########################################################################
class _PresetsEditor( GafferUI.Widget ) :
def __init__( self, **kw ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 8 )
GafferUI.Widget.__init__( self, row, **kw )
with row :
with GafferUI.ListContainer( spacing = 4 ) :
self.__pathListing = GafferUI.PathListingWidget(
Gaffer.DictPath( collections.OrderedDict(), "/" ),
columns = ( GafferUI.PathListingWidget.defaultNameColumn, ),
)
self.__pathListing.setDragPointer( "" )
self.__pathListing.setSortable( False )
self.__pathListing.setHeaderVisible( False )
self.__pathListing._qtWidget().setFixedWidth( 200 )
self.__pathListing._qtWidget().setFixedHeight( 200 )
self.__pathListingSelectionChangedConnection = self.__pathListing.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__selectionChanged ) )
self.__dragEnterConnection = self.__pathListing.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dragMoveConnection = self.__pathListing.dragMoveSignal().connect( Gaffer.WeakMethod( self.__dragMove ) )
self.__dragEndConnection = self.__pathListing.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ) )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
self.__addButton = GafferUI.Button( image = "plus.png", hasFrame = False )
self.__addButtonClickedConnection = self.__addButton.clickedSignal().connect( Gaffer.WeakMethod( self.__addButtonClicked ) )
self.__deleteButton = GafferUI.Button( image = "minus.png", hasFrame = False )
self.__deleteButtonClickedConnection = self.__deleteButton.clickedSignal().connect( Gaffer.WeakMethod( self.__deleteButtonClicked ) )
with GafferUI.ListContainer( spacing = 4 ) as self.__editingColumn :
GafferUI.Label( "Name" )
self.__nameWidget = GafferUI.TextWidget()
self.__nameEditingFinishedConnection = self.__nameWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__nameEditingFinished ) )
GafferUI.Spacer( IECore.V2i( 4 ), maximumSize = IECore.V2i( 4 ) )
GafferUI.Label( "Value" )
# We make a UI for editing preset values by copying the plug
# onto this node and then making a PlugValueWidget for it.
self.__valueNode = Gaffer.Node( "PresetEditor" )
self.__valuePlugSetConnection = self.__valueNode.plugSetSignal().connect( Gaffer.WeakMethod( self.__valuePlugSet ) )
def setPlug( self, plug ) :
self.__plug = plug
self.__plugMetadataChangedConnection = None
del self.__editingColumn[4:]
plugValueWidget = None
if self.__plug is not None :
self.__plugMetadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ) )
self.__valueNode["presetValue"] = plug.createCounterpart( "presetValue", plug.Direction.In )
if hasattr( self.__plug, "getValue" ) :
plugValueWidget = GafferUI.PlugValueWidget.create( self.__valueNode["presetValue"], useTypeOnly = True )
self.__editingColumn.append( plugValueWidget if plugValueWidget is not None else GafferUI.TextWidget() )
self.__editingColumn.append( GafferUI.Spacer( IECore.V2i( 0 ), parenting = { "expand" : True } ) )
self.__updatePath()
self.__addButton.setEnabled( hasattr( self.__plug, "getValue" ) )
def getPlug( self ) :
return self.__plug
def __updatePath( self ) :
d = self.__pathListing.getPath().dict()
d.clear()
if self.__plug is not None :
for name in Gaffer.Metadata.registeredValues( self.__plug, instanceOnly = True, persistentOnly = True ) :
if name.startswith( "preset:" ) :
d[name[7:]] = Gaffer.Metadata.value( self.__plug, name )
self.__pathListing.getPath().pathChangedSignal()( self.__pathListing.getPath() )
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if self.__plug is None or not Gaffer.affectedByChange( self.__plug, nodeTypeId, plugPath, plug ) :
return
if key.startswith( "preset:" ) :
self.__updatePath()
def __selectionChanged( self, listing ) :
selectedPaths = listing.getSelectedPaths()
self.__nameWidget.setText( selectedPaths[0][0] if selectedPaths else "" )
if selectedPaths :
with Gaffer.BlockedConnection( self.__valuePlugSetConnection ) :
self.__valueNode["presetValue"].setValue(
Gaffer.Metadata.value( self.getPlug(), "preset:" + selectedPaths[0][0] )
)
self.__editingColumn.setEnabled( bool( selectedPaths ) )
self.__deleteButton.setEnabled( bool( selectedPaths ) )
def __dragEnter( self, listing, event ) :
if event.sourceWidget is not self.__pathListing :
return False
if not isinstance( event.data, IECore.StringVectorData ) :
return False
return True
def __dragMove( self, listing, event ) :
d = self.__pathListing.getPath().dict()
srcPath = self.__pathListing.getPath().copy().setFromString( event.data[0] )
srcIndex = d.keys().index( srcPath[0] )
targetPath = self.__pathListing.pathAt( event.line.p0 )
if targetPath is not None :
targetIndex = d.keys().index( targetPath[0] )
else :
targetIndex = 0 if event.line.p0.y < 1 else len( d )
if srcIndex == targetIndex :
return True
items = d.items()
item = items[srcIndex]
del items[srcIndex]
items.insert( targetIndex, item )
d.clear()
d.update( items )
self.__pathListing.getPath().pathChangedSignal()( self.__pathListing.getPath() )
return True
def __dragEnd( self, listing, event ) :
d = self.__pathListing.getPath().dict()
with Gaffer.BlockedConnection( self.__plugMetadataChangedConnection ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
# reorder by removing everything and reregistering in the order we want
for item in d.items() :
Gaffer.Metadata.deregisterValue( self.getPlug(), "preset:" + item[0] )
for item in d.items() :
Gaffer.Metadata.registerValue( self.getPlug(), "preset:" + item[0], item[1] )
self.__updatePath()
return True
def __addButtonClicked( self, button ) :
existingNames = [ p[0] for p in self.__pathListing.getPath().children() ]
name = "New Preset"
index = 1
while name in existingNames :
name = "New Preset %d" % index
index += 1
with Gaffer.UndoContext( self.__plug.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( self.__plug, "preset:" + name, self.__plug.getValue() )
self.__pathListing.setSelectedPaths(
self.__pathListing.getPath().copy().setFromString( "/" + name )
)
self.__nameWidget.grabFocus()
self.__nameWidget.setSelection( 0, len( name ) )
return True
def __deleteButtonClicked( self, button ) :
paths = self.__pathListing.getPath().children()
selectedPreset = self.__pathListing.getSelectedPaths()[0][0]
selectedIndex = [ p[0] for p in paths ].index( selectedPreset )
with Gaffer.UndoContext( self.__plug.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.deregisterValue( self.__plug, "preset:" + selectedPreset )
del paths[selectedIndex]
if len( paths ) :
self.__pathListing.setSelectedPaths( [ paths[min(selectedIndex,len( paths )-1)] ] )
return True
def __nameEditingFinished( self, nameWidget ) :
selectedPaths = self.__pathListing.getSelectedPaths()
if not len( selectedPaths ) :
return True
oldName = selectedPaths[0][0]
newName = nameWidget.getText()
items = self.__pathListing.getPath().dict().items()
with Gaffer.BlockedConnection( self.__plugMetadataChangedConnection ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
# retain order by removing and reregistering everything
for item in items :
Gaffer.Metadata.deregisterValue( self.getPlug(), "preset:" + item[0] )
for item in items :
Gaffer.Metadata.registerValue( self.getPlug(), "preset:" + (item[0] if item[0] != oldName else newName), item[1] )
self.__updatePath()
self.__pathListing.setSelectedPaths( [ self.__pathListing.getPath().copy().setFromString( "/" + newName ) ] )
return True
def __valuePlugSet( self, plug ) :
if not plug.isSame( self.__valueNode["presetValue"] ) :
return
selectedPaths = self.__pathListing.getSelectedPaths()
preset = selectedPaths[0][0]
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( self.getPlug(), "preset:" + preset, plug.getValue() )
##########################################################################
# _PlugEditor. This provides a panel for editing a specific plug's name,
# description, etc.
##########################################################################
class _PlugEditor( GafferUI.Widget ) :
def __init__( self, **kw ) :
scrolledContainer = GafferUI.ScrolledContainer( horizontalMode = GafferUI.ScrolledContainer.ScrollMode.Never, borderWidth = 8 )
GafferUI.Widget.__init__( self, scrolledContainer, **kw )
self.__metadataWidgets = {}
scrolledContainer.setChild( GafferUI.ListContainer( spacing = 4 ) )
with scrolledContainer.getChild() :
with _Row() :
_Label( "Name" )
self.__nameWidget = GafferUI.NameWidget( None )
with _Row() :
_Label( "Label" )
self.__metadataWidgets["label"] = _StringMetadataWidget( key = "label", acceptEmptyString = False )
with _Row() :
_Label( "Description", parenting = { "verticalAlignment" : GafferUI.ListContainer.VerticalAlignment.Top } )
self.__metadataWidgets["description"] = _MultiLineStringMetadataWidget( key = "description" )
self.__metadataWidgets["description"].textWidget().setFixedLineHeight( 10 )
with _Row() :
_Label( "Widget" )
self.__widgetMenu = GafferUI.MenuButton(
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__widgetMenuDefinition ) )
)
with GafferUI.Collapsible( "Presets", collapsed = True ) :
with _Row() :
_Label( "" )
self.__presetsEditor = _PresetsEditor()
with GafferUI.Collapsible( "Widget Settings", collapsed = True ) :
with GafferUI.ListContainer( spacing = 4 ) :
with _Row() :
_Label( "Divider" )
self.__metadataWidgets["divider"] = _BoolMetadataWidget( key = "divider" )
for m in self.__metadataDefinitions :
with _Row() :
_Label( m.label )
self.__metadataWidgets[m.key] = m.metadataWidgetType( key = m.key )
with GafferUI.Collapsible( "Node Graph", collapsed = True ) :
with GafferUI.ListContainer( spacing = 4 ) as self.__nodeGraphSection :
with _Row() :
_Label( "Gadget" )
self.__gadgetMenu = GafferUI.MenuButton(
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__gadgetMenuDefinition ) )
)
with _Row() :
_Label( "Position" )
self.__metadataWidgets["nodeGadget:nodulePosition"] = _MenuMetadataWidget(
key = "nodeGadget:nodulePosition",
labelsAndValues = [
( "Default", None ),
( "Top", "top" ),
( "Bottom", "bottom" ),
( "Left", "left" ),
( "Right", "right" ),
]
)
with _Row() :
_Label( "Color" )
self.__metadataWidgets["nodule:color"] = _ColorSwatchMetadataWidget( key = "nodule:color" )
with _Row() :
_Label( "Connection Color" )
self.__metadataWidgets["connectionGadget:color"] = _ColorSwatchMetadataWidget( key = "connectionGadget:color" )
GafferUI.Spacer( IECore.V2i( 0 ), parenting = { "expand" : True } )
self.__plugMetadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ) )
self.__plug = None
def setPlug( self, plug ) :
self.__plug = plug
self.__nameWidget.setGraphComponent( self.__plug )
for widget in self.__metadataWidgets.values() :
widget.setTarget( self.__plug )
self.__updateWidgetMenuText()
self.__updateWidgetSettings()
self.__updateGadgetMenuText()
self.__presetsEditor.setPlug( plug )
self.__nodeGraphSection.setEnabled( self.__plug is not None and self.__plug.parent().isSame( self.__plug.node() ) )
self.setEnabled( self.__plug is not None )
def getPlug( self ) :
return self.__plug
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if self.getPlug() is None :
return
if not Gaffer.affectedByChange( self.getPlug(), nodeTypeId, plugPath, plug ) :
return
if key == "plugValueWidget:type" :
self.__updateWidgetMenuText()
self.__updateWidgetSettings()
elif key == "nodule:type" :
self.__updateGadgetMenuText()
def __updateWidgetMenuText( self ) :
if self.getPlug() is None :
self.__widgetMenu.setText( "" )
return
metadata = Gaffer.Metadata.value( self.getPlug(), "plugValueWidget:type" )
for w in self.__widgetDefinitions :
if w.metadata == metadata :
self.__widgetMenu.setText( w.label )
return
self.__widgetMenu.setText( metadata )
def __updateWidgetSettings( self ) :
widgetType = None
if self.getPlug() is not None :
widgetType = Gaffer.Metadata.value( self.getPlug(), "plugValueWidget:type" )
for m in self.__metadataDefinitions :
widget = self.__metadataWidgets[m.key]
widget.parent().setEnabled( m.plugValueWidgetType == widgetType )
self.__metadataWidgets["connectionGadget:color"].parent().setEnabled(
self.getPlug() is not None and self.getPlug().direction() == Gaffer.Plug.Direction.In
)
def __widgetMenuDefinition( self ) :
result = IECore.MenuDefinition()
if self.getPlug() is None :
return result
metadata = Gaffer.Metadata.value( self.getPlug(), "plugValueWidget:type" )
for w in self.__widgetDefinitions :
if not isinstance( self.getPlug(), w.plugType ) :
continue
result.append(
"/" + w.label,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__registerOrDeregisterMetadata ), key = "plugValueWidget:type", value = w.metadata ),
"checkBox" : metadata == w.metadata,
}
)
return result
def __updateGadgetMenuText( self ) :
if self.getPlug() is None :
self.__gadgetMenu.setText( "" )
return
metadata = Gaffer.Metadata.value( self.getPlug(), "nodule:type" )
metadata = None if metadata == "GafferUI::StandardNodule" else metadata
for g in self.__gadgetDefinitions :
if g.metadata == metadata :
self.__gadgetMenu.setText( g.label )
return
self.__gadgetMenu.setText( metadata )
def __gadgetMenuDefinition( self ) :
result = IECore.MenuDefinition()
if self.getPlug() is None :
return result
metadata = Gaffer.Metadata.value( self.getPlug(), "nodule:type" )
for g in self.__gadgetDefinitions :
if not isinstance( self.getPlug(), g.plugType ) :
continue
result.append(
"/" + g.label,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__registerOrDeregisterMetadata ), key = "nodule:type", value = g.metadata ),
"checkBox" : metadata == g.metadata,
}
)
return result
def __registerOrDeregisterMetadata( self, unused, key, value ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
if value is not None :
Gaffer.Metadata.registerValue( self.getPlug(), key, value )
else :
Gaffer.Metadata.deregisterValue( self.getPlug(), key )
__WidgetDefinition = collections.namedtuple( "WidgetDefinition", ( "label", "plugType", "metadata" ) )
__widgetDefinitions = (
__WidgetDefinition( "Default", Gaffer.Plug, None ),
__WidgetDefinition( "Checkbox", Gaffer.IntPlug, "GafferUI.BoolPlugValueWidget" ),
__WidgetDefinition( "Text Region", Gaffer.StringPlug, "GafferUI.MultiLineStringPlugValueWidget" ),
__WidgetDefinition( "File Chooser", Gaffer.StringPlug, "GafferUI.FileSystemPathPlugValueWidget" ),
__WidgetDefinition( "Presets Menu", Gaffer.ValuePlug, "GafferUI.PresetsPlugValueWidget" ),
__WidgetDefinition( "Connection", Gaffer.Plug, "GafferUI.ConnectionPlugValueWidget" ),
__WidgetDefinition( "None", Gaffer.Plug, "" ),
)
__MetadataDefinition = collections.namedtuple( "MetadataDefinition", ( "key", "label", "metadataWidgetType", "plugValueWidgetType" ) )
__metadataDefinitions = (
__MetadataDefinition( "fileSystemPathPlugValueWidget:extensions", "File Extensions", _StringMetadataWidget, "GafferUI.FileSystemPathPlugValueWidget" ),
__MetadataDefinition( "pathPlugValueWidget:bookmarks", "Bookmarks Category", _StringMetadataWidget, "GafferUI.FileSystemPathPlugValueWidget" ),
__MetadataDefinition( "pathPlugValueWidget:valid", "File Must Exist", _BoolMetadataWidget, "GafferUI.FileSystemPathPlugValueWidget" ),
__MetadataDefinition( "pathPlugValueWidget:leaf", "No Directories", _BoolMetadataWidget, "GafferUI.FileSystemPathPlugValueWidget" ),
__MetadataDefinition( "fileSystemPathPlugValueWidget:includeSequences", "Allow sequences", _BoolMetadataWidget, "GafferUI.FileSystemPathPlugValueWidget" ),
# Note that includeSequenceFrameRange is primarily used by GafferCortex.
# Think twice before using it elsewhere as it may not exist in the future.
__MetadataDefinition( "fileSystemPathPlugValueWidget:includeSequenceFrameRange", "Sequences include frame range", _BoolMetadataWidget, "GafferUI.FileSystemPathPlugValueWidget" ),
)
__GadgetDefinition = collections.namedtuple( "GadgetDefinition", ( "label", "plugType", "metadata" ) )
__gadgetDefinitions = (
__GadgetDefinition( "Default", Gaffer.Plug, None ),
__GadgetDefinition( "Array", Gaffer.ArrayPlug, "GafferUI::CompoundNodule" ),
__GadgetDefinition( "None", Gaffer.Plug, "" ),
)
##########################################################################
# _SectionEditor. This provides a panel for editing the details of
# a specific section.
##########################################################################
class _SectionEditor( GafferUI.Widget ) :
def __init__( self, **kw ) :
column = GafferUI.ListContainer( spacing = 4, borderWidth = 8 )
GafferUI.Widget.__init__( self, column, **kw )
with column :
with _Row() :
_Label( "Name" )
self.__nameWidget = GafferUI.TextWidget()
self.__nameWidgetEditingFinishedConnection = self.__nameWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__nameWidgetEditingFinished ) )
with _Row() :
_Label( "Summary", parenting = { "verticalAlignment" : GafferUI.ListContainer.VerticalAlignment.Top } )
self.__summaryMetadataWidget = _MultiLineStringMetadataWidget( key = "" )
self.__section = ""
self.__plugParent = None
self.__nameChangedSignal = Gaffer.Signal3()
def setPlugParent( self, plugParent ) :
self.__plugParent = plugParent
self.__summaryMetadataWidget.setTarget( self.__plugParent )
def getPlugParent( self ) :
return self.__plugParent
def setSection( self, section ) :
assert( isinstance( section, basestring ) )
self.__section = section
self.__nameWidget.setText( section.rpartition( "." )[-1] )
self.__summaryMetadataWidget.setKey( "layout:section:" + self.__section + ":summary" )
def getSection( self ) :
return self.__section
def nameChangedSignal( self ) :
return self.__nameChangedSignal
def __nameWidgetEditingFinished( self, nameWidget ) :
if nameWidget.getText() == "" :
# Can't rename to the empty string - abandon the edit.
self.setSection( self.__section )
return
oldSectionPath = self.__section.split( "." )
newSectionPath = oldSectionPath[:]
newSectionPath[-1] = nameWidget.getText().replace( ".", "" )
if oldSectionPath == newSectionPath :
return
def newSection( oldSection ) :
s = oldSection.split( "." )
if s[:len(oldSectionPath)] == oldSectionPath :
s[:len(oldSectionPath)] = newSectionPath
return ".".join( s )
else :
return oldSection
with Gaffer.UndoContext( self.__plugParent.ancestor( Gaffer.ScriptNode ) ) :
for plug in self.__plugParent.children( Gaffer.Plug ) :
s = Gaffer.Metadata.value( plug, "layout:section" )
if s is not None :
Gaffer.Metadata.registerValue( plug, "layout:section", newSection( s ) )
emptySections = Gaffer.Metadata.value( self.getPlugParent(), "uiEditor:emptySections" )
if emptySections :
for i in range( 0, len( emptySections ) ) :
emptySections[i] = newSection( emptySections[i] )
Gaffer.Metadata.registerValue( self.getPlugParent(), "uiEditor:emptySections", emptySections )
for name in Gaffer.Metadata.registeredValues( self.getPlugParent(), instanceOnly = True, persistentOnly = True ) :
m = re.match( "(layout:section:)(.*)(:.*)", name )
if m :
if newSection( m.group( 2 ) ) != m.group( 2 ) :
Gaffer.Metadata.registerValue(
self.getPlugParent(),
m.group( 1 ) + newSection( m.group( 2 ) ) + m.group( 3 ),
Gaffer.Metadata.value( self.getPlugParent(), name )
)
Gaffer.Metadata.deregisterValue( self.getPlugParent(), name )
self.setSection( ".".join( newSectionPath ) )
self.nameChangedSignal()( self, ".".join( oldSectionPath ), ".".join( newSectionPath ) )
| bsd-3-clause | 1,587,227,539,915,710,200 | 31.422409 | 180 | 0.67431 | false |
clessor88/trigrams | trigrams.py | 1 | 2164 | """Trigram analysis text generator."""
import sys
from random import randint
import io
def create_word_bank(word_list):
"""Create a dictionary with keys of 2 word pairs."""
word_bank = {}
for i, word in enumerate(word_list):
key = word + ' ' + word_list[i + 1]
if i < len(word_list) - 2:
word_bank.setdefault(key, []).append(word_list[i + 2])
else:
break
return word_bank
def create_text(num_words, word_list):
"""Generate the block of computer generated text."""
word_bank = create_word_bank(word_list)
rand = randint(0, len(word_list) - 3)
text_list = ['...', word_list[rand], word_list[rand + 1]]
for i in range(num_words - 2):
key = text_list[-2] + ' ' + text_list[-1]
try:
text_list.append(word_bank[key][randint(0,
len(word_bank[key]) - 1)])
except KeyError:
rand = randint(0, len(word_list) - 3)
text_list.extend([word_list[rand], word_list[rand + 1]])
return ' '.join(text_list) + ' ...'
def generate_trigrams(filename, num_words):
"""Generate trigram output."""
try:
f = io.open(filename, encoding='utf-8')
text = f.read()
f.close()
except FileNotFoundError:
print('File was not found. Ensure that the file exists.')
sys.exit(1)
except IOError:
print('There was a problem opening the file.')
sys.exit(1)
else:
word_list = text.split()
text = create_text(num_words, word_list)
print(text)
return text
def main():
"""Take in filename and run generate_bigrams."""
if len(sys.argv) != 3:
print(u'usage: ./trigrams.py file num_words')
sys.exit(1)
try:
num_words = int(sys.argv[2])
except ValueError:
print(u'num_words must be a number.')
else:
if num_words > 1000 or num_words < 1:
print(u'num_words must be between 1 and 1000.')
sys.exit(1)
else:
filename = sys.argv[1]
generate_trigrams(filename, num_words)
if __name__ == '__main__':
main()
| mit | 2,701,385,325,316,454,000 | 28.643836 | 68 | 0.554067 | false |
peeyush-tm/check_mk | modules/notify.py | 1 | 82627 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import pprint, urllib, select, subprocess, socket
# Please have a look at doc/Notifications.png:
#
# There are two types of contexts:
# 1. Raw contexts (purple)
# -> These come out from the monitoring core. They are not yet
# assinged to a certain plugin. In case of rule based notifictions
# they are not even assigned to a certain contact.
#
# 2. Plugin contexts (cyan)
# -> These already bear all information about the contact, the plugin
# to call and its parameters.
# .--Configuration-------------------------------------------------------.
# | ____ __ _ _ _ |
# | / ___|___ _ __ / _(_) __ _ _ _ _ __ __ _| |_(_) ___ _ __ |
# | | | / _ \| '_ \| |_| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \ |
# | | |__| (_) | | | | _| | (_| | |_| | | | (_| | |_| | (_) | | | | |
# | \____\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_| |
# | |___/ |
# +----------------------------------------------------------------------+
# | Default values of global configuration variables. |
# '----------------------------------------------------------------------'
# Default settings
notification_logdir = var_dir + "/notify"
notification_spooldir = var_dir + "/notify/spool"
notification_bulkdir = var_dir + "/notify/bulk"
notification_core_log = var_dir + "/notify/nagios.log" # Fallback for history if no CMC running
notification_log = log_dir + "/notify.log"
notification_logging = 1
notification_backlog = 10 # keep the last 10 notification contexts for reference
# Settings for new rule based notifications
enable_rulebased_notifications = False
notification_fallback_email = ""
notification_rules = []
notification_bulk_interval = 10 # Check every 10 seconds for ripe bulks
# Notification Spooling
notification_spooling = False
notification_spool_to = None
notification_log_template = \
u"$CONTACTNAME$ - $NOTIFICATIONTYPE$ - " \
u"$HOSTNAME$ $HOSTSTATE$ - " \
u"$SERVICEDESC$ $SERVICESTATE$ "
notification_mail_command = u"mail -s '$SUBJECT$' '$CONTACTEMAIL$'"
notification_host_subject = u"Check_MK: $HOSTNAME$ - $NOTIFICATIONTYPE$"
notification_service_subject = u"Check_MK: $HOSTNAME$/$SERVICEDESC$ $NOTIFICATIONTYPE$"
notification_common_body = u"""Host: $HOSTNAME$
Alias: $HOSTALIAS$
Address: $HOSTADDRESS$
"""
notification_host_body = u"""State: $LASTHOSTSTATE$ -> $HOSTSTATE$ ($NOTIFICATIONTYPE$)
Command: $HOSTCHECKCOMMAND$
Output: $HOSTOUTPUT$
Perfdata: $HOSTPERFDATA$
$LONGHOSTOUTPUT$
"""
notification_service_body = u"""Service: $SERVICEDESC$
State: $LASTSERVICESTATE$ -> $SERVICESTATE$ ($NOTIFICATIONTYPE$)
Command: $SERVICECHECKCOMMAND$
Output: $SERVICEOUTPUT$
Perfdata: $SERVICEPERFDATA$
$LONGSERVICEOUTPUT$
"""
#.
# .--Main----------------------------------------------------------------.
# | __ __ _ |
# | | \/ | __ _(_)_ __ |
# | | |\/| |/ _` | | '_ \ |
# | | | | | (_| | | | | | |
# | |_| |_|\__,_|_|_| |_| |
# | |
# +----------------------------------------------------------------------+
# | Main code entry point. |
# '----------------------------------------------------------------------'
def notify_usage():
sys.stderr.write("""Usage: check_mk --notify [--keepalive]
check_mk --notify spoolfile <filename>
Normally the notify module is called without arguments to send real
notification. But there are situations where this module is called with
COMMANDS to e.g. support development of notification plugins.
Available commands:
spoolfile <filename> Reads the given spoolfile and creates a
notification out of its data
stdin Read one notification context from stdin instead
of taking variables from environment
replay N Uses the N'th recent notification from the backlog
and sends it again, counting from 0.
send-bulks Send out ripe bulk notifications
""")
# Main function called by cmk --notify. It either starts the
# keepalive mode (used by CMC), sends out one notifications from
# several possible sources or sends out all ripe bulk notifications.
def do_notify(args):
global notify_mode, notification_logging
if notification_logging == 0:
notification_logging = 1 # transform deprecated value 0 to 1
try:
if not os.path.exists(notification_logdir):
os.makedirs(notification_logdir)
if not os.path.exists(notification_spooldir):
os.makedirs(notification_spooldir)
notify_mode = 'notify'
if args:
notify_mode = args[0]
if notify_mode not in [ 'stdin', 'spoolfile', 'replay', 'send-bulks' ]:
sys.stderr.write("ERROR: Invalid call to check_mk --notify.\n\n")
notify_usage()
sys.exit(1)
if len(args) != 2 and notify_mode not in [ "stdin", "replay", "send-bulks" ]:
sys.stderr.write("ERROR: need an argument to --notify %s.\n\n" % notify_mode)
sys.exit(1)
elif notify_mode == 'spoolfile':
filename = args[1]
elif notify_mode == 'replay':
try:
replay_nr = int(args[1])
except:
replay_nr = 0
# If the notify_mode is set to 'spoolfile' we try to parse the given spoolfile
# This spoolfile contains a python dictionary
# { context: { Dictionary of environment variables }, plugin: "Plugin name" }
# Any problems while reading the spoolfile results in returning 2
# -> mknotifyd deletes this file
if notify_mode == "spoolfile":
return handle_spoolfile(filename)
elif opt_keepalive:
notify_keepalive()
elif notify_mode == 'replay':
raw_context = raw_context_from_backlog(replay_nr)
notify_notify(raw_context)
elif notify_mode == 'stdin':
notify_notify(raw_context_from_stdin())
elif notify_mode == "send-bulks":
send_ripe_bulks()
else:
notify_notify(raw_context_from_env())
except Exception, e:
crash_dir = var_dir + "/notify"
if not os.path.exists(crash_dir):
os.makedirs(crash_dir)
file(crash_dir + "/crash.log", "a").write("CRASH (%s):\n%s\n" %
(time.strftime("%Y-%m-%d %H:%M:%S"), format_exception()))
# This function processes one raw notification and decides wether it
# should be spooled or not. In the latter cased a local delivery
# is being done.
def notify_notify(raw_context, analyse=False):
if not analyse:
store_notification_backlog(raw_context)
notify_log("----------------------------------------------------------------------")
if analyse:
notify_log("Analysing notification context with %s variables" % len(raw_context))
else:
notify_log("Got raw notification context with %s variables" % len(raw_context))
# Add some further variable for the conveniance of the plugins
if notification_logging >= 2:
encoded_context = dict(raw_context.items())
convert_context_to_unicode(encoded_context)
notify_log("Raw notification context:\n"
+ "\n".join([" %s=%s" % v for v in sorted(encoded_context.items())]))
raw_keys = list(raw_context.keys())
try:
complete_raw_context(raw_context)
except Exception, e:
notify_log("Error on completing raw context: %s" % e)
if notification_logging >= 2:
notify_log("Computed variables:\n"
+ "\n".join(sorted([" %s=%s" % (k, raw_context[k]) for k in raw_context if k not in raw_keys])))
# Spool notification to remote host, if this is enabled
if notification_spool_to:
remote_host, tcp_port, also_local = notification_spool_to
target_site = "%s:%s" % (remote_host, tcp_port)
create_spoolfile({"context": raw_context, "forward": target_site})
if not also_local:
return
return locally_deliver_raw_context(raw_context, analyse=analyse)
# Here we decide which notification implementation we are using.
# Hopefully we can drop a couple of them some day
# 1. Rule Based Notifiations (since 1.2.5i1)
# 2. Flexible Notifications (since 1.2.2)
# 3. Plain email notification (refer to git log if you are really interested)
def locally_deliver_raw_context(raw_context, analyse=False):
contactname = raw_context.get("CONTACTNAME")
try:
# If rule based notifications are enabled then the Micro Core does not set the
# variable CONTACTNAME. In the other cores the CONTACTNAME is being set to
# check-mk-notify.
# We do we not simply check the config variable enable_rulebased_notifications?
# -> Because the core needs are restart in order to reflect this while the
# notification mode of Check_MK not. There are thus situations where the
# setting of the core is different from our global variable. The core must
# have precedence in this situation!
if not contactname or contactname == "check-mk-notify":
# 1. RULE BASE NOTIFICATIONS
notify_log("Preparing rule based notifications")
return notify_rulebased(raw_context, analyse=analyse)
if analyse:
return # Analysis only possible when rule based notifications are enabled
# Now fetch all configuration about that contact (it needs to be configure via
# Check_MK for that purpose). If we do not know that contact then we cannot use
# flexible notifications even if they are enabled.
contact = contacts.get(contactname)
if contact.get("disable_notifications", False):
notify_log("Notifications for %s are disabled in personal settings. Skipping." % contactname)
return
# Get notification settings for the contact in question - if available.
if contact:
method = contact.get("notification_method", "email")
else:
method = "email"
if type(method) == tuple and method[0] == 'flexible':
# 2. FLEXIBLE NOTIFICATIONS
notify_log("Preparing flexible notifications for %s" % contactname)
notify_flexible(raw_context, method[1])
else:
# 3. PLAIN EMAIL NOTIFICATION
notify_log("Preparing plain email notifications for %s" % contactname)
notify_plain_email(raw_context)
except Exception, e:
if opt_debug:
raise
notify_log("ERROR: %s\n%s" % (e, format_exception()))
def notification_replay_backlog(nr):
global notify_mode
notify_mode = "replay"
raw_context = raw_context_from_backlog(nr)
notify_notify(raw_context)
def notification_analyse_backlog(nr):
global notify_mode
notify_mode = "replay"
raw_context = raw_context_from_backlog(nr)
return notify_notify(raw_context, analyse=True)
#.
# .--Keepalive-Mode (Used by CMC)----------------------------------------.
# | _ __ _ _ |
# | | |/ /___ ___ _ __ __ _| (_)_ _____ |
# | | ' // _ \/ _ \ '_ \ / _` | | \ \ / / _ \ |
# | | . \ __/ __/ |_) | (_| | | |\ V / __/ |
# | |_|\_\___|\___| .__/ \__,_|_|_| \_/ \___| |
# | |_| |
# +----------------------------------------------------------------------+
# | Implementation of cmk --notify --keepalive, which is being used |
# | by the Micro Core. |
# '----------------------------------------------------------------------'
def notify_keepalive():
last_config_timestamp = config_timestamp()
# Send signal that we are ready to receive the next notification, but
# not after a config-reload-restart (see below)
if os.getenv("CMK_NOTIFY_RESTART") != "1":
notify_log("Starting in keepalive mode with PID %d" % os.getpid())
sys.stdout.write("*")
sys.stdout.flush()
else:
notify_log("We are back after a restart.")
while True:
try:
# Invalidate timeperiod cache
global g_inactive_timerperiods
g_inactive_timerperiods = None
# If the configuration has changed, we do a restart. But we do
# this check just before the next notification arrives. We must
# *not* read data from stdin, just peek! There is still one
# problem: when restarting we must *not* send the initial '*'
# byte, because that must be not no sooner then the notification
# has been sent. We do this by setting the environment variable
# CMK_NOTIFY_RESTART=1
if notify_data_available():
if last_config_timestamp != config_timestamp():
notify_log("Configuration has changed. Restarting myself.")
os.putenv("CMK_NOTIFY_RESTART", "1")
os.execvp("cmk", sys.argv)
data = ""
while not data.endswith("\n\n"):
try:
new_data = ""
new_data = os.read(0, 32768)
except IOError, e:
new_data = ""
except Exception, e:
if opt_debug:
raise
notify_log("Cannot read data from CMC: %s" % e)
if not new_data:
notify_log("CMC has closed the connection. Shutting down.")
sys.exit(0) # closed stdin, this is
data += new_data
try:
context = raw_context_from_string(data.rstrip('\n'))
notify_notify(context)
except Exception, e:
if opt_debug:
raise
notify_log("ERROR %s\n%s" % (e, format_exception()))
# Signal that we are ready for the next notification
sys.stdout.write("*")
sys.stdout.flush()
# Fix vor Python 2.4:
except SystemExit, e:
sys.exit(e)
except Exception, e:
if opt_debug:
raise
notify_log("ERROR %s\n%s" % (e, format_exception()))
send_ripe_bulks()
def notify_data_available():
readable, writeable, exceptionable = select.select([0], [], [], notification_bulk_interval)
return not not readable
#.
# .--Rule-Based-Notifications--------------------------------------------.
# | ____ _ _ _ |
# | | _ \ _ _| | ___| |__ __ _ ___ ___ __| | |
# | | |_) | | | | |/ _ \ '_ \ / _` / __|/ _ \/ _` | |
# | | _ <| |_| | | __/ |_) | (_| \__ \ __/ (_| | |
# | |_| \_\\__,_|_|\___|_.__/ \__,_|___/\___|\__,_| |
# | |
# +----------------------------------------------------------------------+
# | Logic for rule based notifications |
# '----------------------------------------------------------------------'
def notify_rulebased(raw_context, analyse=False):
# First step: go through all rules and construct our table of
# notification plugins to call. This is a dict from (user, plugin) to
# a pair if (locked, parameters). If locked is True, then a user
# cannot cancel this notification via his personal notification rules.
# Example:
# notifications = {
# ( "hh", "email" ) : ( False, [] ),
# ( "hh", "sms" ) : ( True, [ "0171737337", "bar" ] ),
# }
notifications = {}
num_rule_matches = 0
rule_info = []
for rule in notification_rules + user_notification_rules():
if "contact" in rule:
notify_log("User %s's rule '%s'..." % (rule["contact"], rule["description"]))
else:
notify_log("Global rule '%s'..." % rule["description"])
why_not = rbn_match_rule(rule, raw_context) # also checks disabling
if why_not:
notify_log(" -> does not match: %s" % why_not)
rule_info.append(("miss", rule, why_not))
else:
notify_log(" -> matches!")
num_rule_matches += 1
contacts = rbn_rule_contacts(rule, raw_context)
# Handle old-style and new-style rules
if "notify_method" in rule: # old-style
plugin = rule["notify_plugin"]
plugin_parameters = rule["notify_method"] # None: do cancel, [ str ]: plugin parameters
else:
plugin, plugin_parameters = rule["notify_plugin"]
bulk = rule.get("bulk")
if plugin_parameters == None: # cancelling
for contact in contacts:
key = contact, plugin
if key in notifications:
locked, plugin_parameters, bulk = notifications[key]
if locked and "contact" in rule:
notify_log(" - cannot cancel notification of %s via %s: it is locked" % key)
else:
notify_log(" - cancelling notification of %s via %s" % key)
del notifications[key]
else:
final_parameters = rbn_finalize_plugin_parameters(raw_context["HOSTNAME"], plugin, plugin_parameters)
for contact in contacts:
key = contact, plugin
plugintxt = plugin or "plain email"
if key in notifications:
locked, previous_parameters, old_bulk = notifications[key]
if locked and "contact" in rule:
notify_log(" - cannot modify notification of %s via %s: it is locked" % (contact, plugintxt))
continue
notify_log(" - modifying notification of %s via %s" % (contact, plugintxt))
else:
notify_log(" - adding notification of %s via %s" % (contact, plugintxt))
notifications[key] = ( not rule.get("allow_disable"), final_parameters, bulk )
rule_info.append(("match", rule, ""))
plugin_info = []
if not notifications:
if num_rule_matches:
notify_log("%d rules matched, but no notification has been created." % num_rule_matches)
else:
if notification_fallback_email and not analyse:
notify_log("No rule matched, falling back to email to %s" % notification_fallback_email)
plugin_context = create_plugin_context(raw_context, [])
contact = rbn_fake_email_contact(notification_fallback_email)
rbn_add_contact_information(plugin_context, contact)
notify_via_email(plugin_context)
else:
# Now do the actual notifications
notify_log("Executing %d notifications:" % len(notifications))
entries = notifications.items()
entries.sort()
for (contact, plugin), (locked, params, bulk) in entries:
if analyse:
verb = "would notify"
else:
verb = "notifying"
notify_log(" * %s %s via %s, parameters: %s, bulk: %s" % (
verb, contact, (plugin or "plain email"), params and ", ".join(params) or "(no parameters)",
bulk and "yes" or "no"))
plugin_info.append((contact, plugin, params, bulk)) # for analysis
try:
plugin_context = create_plugin_context(raw_context, params)
rbn_add_contact_information(plugin_context, contact)
if not analyse:
if bulk:
do_bulk_notify(contact, plugin, params, plugin_context, bulk)
elif notification_spooling:
create_spoolfile({"context": plugin_context, "plugin": plugin})
else:
call_notification_script(plugin, plugin_context)
except Exception, e:
if opt_debug:
raise
fe = format_exception()
notify_log(" ERROR: %s" % e)
notify_log(fe)
analysis_info = rule_info, plugin_info
return analysis_info
def rbn_finalize_plugin_parameters(hostname, plugin, rule_parameters):
# Right now we are only able to finalize notification plugins with dict parameters..
if type(rule_parameters) == dict:
parameters = host_extra_conf_merged(hostname, notification_parameters.get(plugin, []))
parameters.update(rule_parameters)
return parameters
else:
return rule_parameters
def add_rulebased_macros(raw_context):
# For the rule based notifications we need the list of contacts
# an object has. The CMC does send this in the macro "CONTACTS"
if "CONTACTS" not in raw_context:
raw_context["CONTACTS"] = livestatus_fetch_contacts(raw_context["HOSTNAME"], raw_context.get("SERVICEDESC"))
# Add a pseudo contact name. This is needed for the correct creation
# of spool files. Spool files are created on a per-contact-base, as in classical
# notifications the core sends out one individual notification per contact.
# In the case of rule based notifications we do not make distinctions between
# the various contacts.
raw_context["CONTACTNAME"] = "check-mk-notify"
# Create a table of all user specific notification rules. Important:
# create deterministic order, so that rule analyses can depend on
# rule indices
def user_notification_rules():
user_rules = []
contactnames = contacts.keys()
contactnames.sort()
for contactname in contactnames:
contact = contacts[contactname]
for rule in contact.get("notification_rules", []):
# Save the owner of the rule for later debugging
rule["contact"] = contactname
# We assume that the "contact_..." entries in the
# rule are allowed and only contain one entry of the
# type "contact_users" : [ contactname ]. This
# is handled by WATO. Contact specific rules are a
# WATO-only feature anyway...
user_rules.append(rule)
notify_log("Found %d user specific rules" % len(user_rules))
return user_rules
def rbn_fake_email_contact(email):
return {
"name" : email,
"alias" : "Explicit email adress " + email,
"email" : email,
"pager" : "",
}
def rbn_add_contact_information(plugin_context, contact):
if type(contact) == dict:
for what in [ "name", "alias", "email", "pager" ]:
plugin_context["CONTACT" + what.upper()] = contact.get(what, "")
for key in contact.keys():
if key[0] == '_':
plugin_context["CONTACT" + key.upper()] = unicode(contact[key])
else:
if contact.startswith("mailto:"): # Fake contact
contact_dict = {
"name" : contact[7:],
"alias" : "Email address " + contact,
"email" : contact[7:],
"pager" : "" }
else:
contact_dict = contacts.get(contact, { "alias" : contact })
contact_dict["name"] = contact
rbn_add_contact_information(plugin_context, contact_dict)
def livestatus_fetch_contacts(host, service):
try:
if service:
query = "GET services\nFilter: host_name = %s\nFilter: service_description = %s\nColumns: contacts\n" % (
host, service)
else:
query = "GET hosts\nFilter: host_name = %s\nColumns: contacts\n" % host
commasepped = livestatus_fetch_query(query).strip()
aslist = commasepped.split(",")
if "check-mk-notify" in aslist: # Remove artifical contact used for rule based notifications
aslist.remove("check-mk-notify")
return ",".join(aslist)
except:
if opt_debug:
raise
return "" # We must allow notifications without Livestatus access
def rbn_match_rule(rule, context):
if rule.get("disabled"):
return "This rule is disabled"
return \
rbn_match_folder(rule, context) or \
rbn_match_hosttags(rule, context) or \
rbn_match_hostgroups(rule, context) or \
rbn_match_servicegroups(rule, context) or \
rbn_match_contactgroups(rule, context) or \
rbn_match_hosts(rule, context) or \
rbn_match_exclude_hosts(rule, context) or \
rbn_match_services(rule, context) or \
rbn_match_exclude_services(rule, context) or \
rbn_match_plugin_output(rule, context) or \
rbn_match_checktype(rule, context) or \
rbn_match_timeperiod(rule) or \
rbn_match_escalation(rule, context) or \
rbn_match_escalation_throtte(rule, context) or \
rbn_match_servicelevel(rule, context) or \
rbn_match_host_event(rule, context) or \
rbn_match_service_event(rule, context) or \
rbn_match_event_console(rule, context)
def rbn_match_folder(rule, context):
if "match_folder" in rule:
mustfolder = rule["match_folder"]
mustpath = mustfolder.split("/")
hasfolder = None
for tag in context.get("HOSTTAGS", "").split():
if tag.startswith("/wato/"):
hasfolder = tag[6:].rstrip("/")
haspath = hasfolder.split("/")
if mustpath == ["",]:
return # Match is on main folder, always OK
while mustpath:
if not haspath or mustpath[0] != haspath[0]:
return "The rule requires WATO folder '%s', but the host is in '%s'" % (
mustfolder, hasfolder)
mustpath = mustpath[1:]
haspath = haspath[1:]
if hasfolder == None:
return "The host is not managed via WATO, but the rule requires a WATO folder"
def rbn_match_hosttags(rule, context):
required = rule.get("match_hosttags")
if required:
tags = context.get("HOSTTAGS", "").split()
if not hosttags_match_taglist(tags, required):
return "The host's tags %s do not match the required tags %s" % (
"|".join(tags), "|".join(required))
def rbn_match_servicegroups(rule, context):
if context["WHAT"] != "SERVICE":
return
required_groups = rule.get("match_servicegroups")
if required_groups != None:
sgn = context.get("SERVICEGROUPNAMES")
if sgn == None:
return "No information about service groups is in the context, but service " \
"must be in group %s" % ( " or ".join(required_groups))
if sgn:
servicegroups = sgn.split(",")
else:
return "The service is in no group, but %s is required" % (
" or ".join(required_groups))
for group in required_groups:
if group in servicegroups:
return
return "The service is only in the groups %s, but %s is required" % (
sgn, " or ".join(required_groups))
def rbn_match_contactgroups(rule, context):
required_groups = rule.get("match_contactgroups")
if context["WHAT"] == "SERVICE":
cgn = context.get("SERVICECONTACTGROUPNAMES")
else:
cgn = context.get("HOSTCONTACTGROUPNAMES")
if required_groups != None:
if cgn == None:
notify_log("Warning: No information about contact groups in the context. " \
"Seams that you don't use the Check_MK Microcore. ")
return
if cgn:
contactgroups = cgn.split(",")
else:
return "The object is in no group, but %s is required" % (
" or ".join(required_groups))
for group in required_groups:
if group in contactgroups:
return
return "The object is only in the groups %s, but %s is required" % (
cgn, " or ".join(required_groups))
def rbn_match_hostgroups(rule, context):
required_groups = rule.get("match_hostgroups")
if required_groups != None:
hgn = context.get("HOSTGROUPNAMES")
if hgn == None:
return "No information about host groups is in the context, but host " \
"must be in group %s" % ( " or ".join(required_groups))
if hgn:
hostgroups = hgn.split(",")
else:
return "The host is in no group, but %s is required" % (
" or ".join(required_groups))
for group in required_groups:
if group in hostgroups:
return
return "The host is only in the groups %s, but %s is required" % (
hgn, " or ".join(required_groups))
def rbn_match_hosts(rule, context):
if "match_hosts" in rule:
hostlist = rule["match_hosts"]
if context["HOSTNAME"] not in hostlist:
return "The host's name '%s' is not on the list of allowed hosts (%s)" % (
context["HOSTNAME"], ", ".join(hostlist))
def rbn_match_exclude_hosts(rule, context):
if context["HOSTNAME"] in rule.get("match_exclude_hosts", []):
return "The host's name '%s' is on the list of excluded hosts" % context["HOSTNAME"]
def rbn_match_services(rule, context):
if "match_services" in rule:
if context["WHAT"] != "SERVICE":
return "The rule specifies a list of services, but this is a host notification."
servicelist = rule["match_services"]
service = context["SERVICEDESC"]
if not in_extraconf_servicelist(servicelist, service):
return "The service's description '%s' dows not match by the list of " \
"allowed services (%s)" % (service, ", ".join(servicelist))
def rbn_match_exclude_services(rule, context):
if context["WHAT"] != "SERVICE":
return
excludelist = rule.get("match_exclude_services", [])
service = context["SERVICEDESC"]
if in_extraconf_servicelist(excludelist, service):
return "The service's description '%s' matches the list of excluded services" \
% context["SERVICEDESC"]
def rbn_match_plugin_output(rule, context):
if "match_plugin_output" in rule:
r = regex(rule["match_plugin_output"])
if context["WHAT"] == "SERVICE":
output = context["SERVICEOUTPUT"]
else:
output = context["HOSTOUTPUT"]
if not r.search(output):
return "The expression '%s' cannot be found in the plugin output '%s'" % \
(rule["match_plugin_output"], output)
def rbn_match_checktype(rule, context):
if "match_checktype" in rule:
if context["WHAT"] != "SERVICE":
return "The rule specifies a list of Check_MK plugins, but this is a host notification."
command = context["SERVICECHECKCOMMAND"]
if not command.startswith("check_mk-"):
return "The rule specified a list of Check_MK plugins, but his is no Check_MK service."
plugin = command[9:]
allowed = rule["match_checktype"]
if plugin not in allowed:
return "The Check_MK plugin '%s' is not on the list of allowed plugins (%s)" % \
(plugin, ", ".join(allowed))
def rbn_match_timeperiod(rule):
if "match_timeperiod" in rule:
timeperiod = rule["match_timeperiod"]
if timeperiod != "24X7" and not check_timeperiod(timeperiod):
return "The timeperiod '%s' is currently not active." % timeperiod
def rbn_match_escalation(rule, context):
if "match_escalation" in rule:
from_number, to_number = rule["match_escalation"]
if context["WHAT"] == "HOST":
notification_number = int(context.get("HOSTNOTIFICATIONNUMBER", 1))
else:
notification_number = int(context.get("SERVICENOTIFICATIONNUMBER", 1))
if notification_number < from_number or notification_number > to_number:
return "The notification number %d does not lie in range %d ... %d" % (
notification_number, from_number, to_number)
def rbn_match_escalation_throtte(rule, context):
if "match_escalation_throttle" in rule:
from_number, rate = rule["match_escalation_throttle"]
if context["WHAT"] == "HOST":
notification_number = int(context.get("HOSTNOTIFICATIONNUMBER", 1))
else:
notification_number = int(context.get("SERVICENOTIFICATIONNUMBER", 1))
if notification_number <= from_number:
return
if (notification_number - from_number) % rate != 0:
return "This notification is being skipped due to throttling. The next number will be %d" % \
(notification_number + rate - ((notification_number - from_number) % rate))
def rbn_match_servicelevel(rule, context):
if "match_sl" in rule:
from_sl, to_sl = rule["match_sl"]
if context['WHAT'] == "SERVICE" and context.get('SVC_SL','').isdigit():
sl = saveint(context.get('SVC_SL'))
else:
sl = saveint(context.get('HOST_SL'))
if sl < from_sl or sl > to_sl:
return "The service level %d is not between %d and %d." % (sl, from_sl, to_sl)
def rbn_match_host_event(rule, context):
if "match_host_event" in rule:
if context["WHAT"] != "HOST":
if "match_service_event" not in rule:
return "This is a service notification, but the rule just matches host events"
else:
return # Let this be handled by match_service_event
allowed_events = rule["match_host_event"]
state = context["HOSTSTATE"]
last_state = context["PREVIOUSHOSTHARDSTATE"]
events = { "UP" : 'r', "DOWN" : 'd', "UNREACHABLE" : 'u' }
return rbn_match_event(context, state, last_state, events, allowed_events)
def rbn_match_service_event(rule, context):
if "match_service_event" in rule:
if context["WHAT"] != "SERVICE":
if "match_host_event" not in rule:
return "This is a host notification, but the rule just matches service events"
else:
return # Let this be handled by match_host_event
allowed_events = rule["match_service_event"]
state = context["SERVICESTATE"]
last_state = context["PREVIOUSSERVICEHARDSTATE"]
events = { "OK" : 'r', "WARNING" : 'w', "CRITICAL" : 'c', "UNKNOWN" : 'u' }
return rbn_match_event(context, state, last_state, events, allowed_events)
def rbn_match_event(context, state, last_state, events, allowed_events):
notification_type = context["NOTIFICATIONTYPE"]
if notification_type == "RECOVERY":
event = events.get(last_state, '?') + 'r'
elif notification_type in [ "FLAPPINGSTART", "FLAPPINGSTOP", "FLAPPINGDISABLED" ]:
event = 'f'
elif notification_type in [ "DOWNTIMESTART", "DOWNTIMEEND", "DOWNTIMECANCELLED"]:
event = 's'
elif notification_type == "ACKNOWLEDGEMENT":
event = 'x'
else:
event = events.get(last_state, '?') + events.get(state, '?')
notify_log("Event type is %s" % event)
# Now go through the allowed events. Handle '?' has matching all types!
for allowed in allowed_events:
if event == allowed or \
event[0] == '?' and len(allowed) > 1 and event[1] == allowed[1]:
return
return "Event type '%s' not handled by this rule. Allowed are: %s" % (
event, ", ".join(allowed_events))
def rbn_rule_contacts(rule, context):
the_contacts = set([])
if rule.get("contact_object"):
the_contacts.update(rbn_object_contacts(context))
if rule.get("contact_all"):
the_contacts.update(rbn_all_contacts())
if rule.get("contact_all_with_email"):
the_contacts.update(rbn_all_contacts(with_email=True))
if "contact_users" in rule:
the_contacts.update(rule["contact_users"])
if "contact_groups" in rule:
the_contacts.update(rbn_groups_contacts(rule["contact_groups"]))
if "contact_emails" in rule:
the_contacts.update(rbn_emails_contacts(rule["contact_emails"]))
all_enabled = []
for contactname in the_contacts:
contact = contacts.get(contactname)
if contact and contact.get("disable_notifications", False):
notify_log(" - skipping contact %s: he/she has disabled notifications" % contactname)
else:
all_enabled.append(contactname)
return all_enabled
def rbn_match_event_console(rule, context):
if "match_ec" in rule:
match_ec = rule["match_ec"]
is_ec_notification = "EC_ID" in context
if match_ec == False and is_ec_notification:
return "Notification has been created by the Event Console."
elif match_ec != False and not is_ec_notification:
return "Notification has not been created by the Event Console."
if match_ec != False:
# Match Event Console rule ID
if "match_rule_id" in match_ec and context["EC_RULE_ID"] != match_ec["match_rule_id"]:
return "EC Event has rule ID '%s', but '%s' is required" % (
context["EC_RULE_ID"], match_ec["match_rule_id"])
# Match syslog priority of event
if "match_priority" in match_ec:
prio_from, prio_to = match_ec["match_priority"]
if prio_from > prio_to:
prio_to, prio_from = prio_from, prio_to
p = int(context["EC_PRIORITY"])
if p < prio_from or p > prio_to:
return "Event has priority %s, but matched range is %s .. %s" % (
p, prio_from, prio_to)
# Match syslog facility of event
if "match_facility" in match_ec:
if match_ec["match_facility"] != int(context["EC_FACILITY"]):
return "Wrong syslog facility %s, required is %s" % (context["EC_FACILITY"], match_ec["match_facility"])
# Match event comment
if "match_comment" in match_ec:
r = regex(match_ec["match_comment"])
if not r.search(context["EC_COMMENT"]):
return "The event comment '%s' does not match the regular expression '%s'" % (
context["EC_COMMENT"], match_ec["match_comment"])
def rbn_object_contacts(context):
commasepped = context.get("CONTACTS")
if commasepped:
return commasepped.split(",")
else:
return []
def rbn_all_contacts(with_email=None):
if not with_email:
return contacts.keys() # We have that via our main.mk contact definitions!
else:
return [
contact_id
for (contact_id, contact)
in contacts.items()
if contact.get("email")]
def rbn_groups_contacts(groups):
if not groups:
return {}
contacts = set([])
query = "GET contactgroups\nColumns: members\n"
for group in groups:
query += "Filter: name = %s\n" % group
query += "Or: %d\n" % len(groups)
response = livestatus_fetch_query(query)
for line in response.splitlines():
line = line.strip()
if line:
contacts.update(line.split(","))
return contacts
def rbn_emails_contacts(emails):
return [ "mailto:" + e for e in emails ]
#.
# .--Flexible-Notifications----------------------------------------------.
# | _____ _ _ _ _ |
# | | ___| | _____ _(_) |__ | | ___ |
# | | |_ | |/ _ \ \/ / | '_ \| |/ _ \ |
# | | _| | | __/> <| | |_) | | __/ |
# | |_| |_|\___/_/\_\_|_.__/|_|\___| |
# | |
# +----------------------------------------------------------------------+
# | Implementation of the pre 1.2.5, hopelessly outdated flexible |
# | notifications. |
# '----------------------------------------------------------------------'
def notify_flexible(raw_context, notification_table):
for entry in notification_table:
plugin = entry["plugin"]
notify_log(" Notification channel with plugin %s" % (plugin or "plain email"))
if not should_notify(raw_context, entry):
continue
plugin_context = create_plugin_context(raw_context, entry.get("parameters", []))
if notification_spooling:
create_spoolfile({"context": plugin_context, "plugin": plugin})
else:
call_notification_script(plugin, plugin_context)
# may return
# 0 : everything fine -> proceed
# 1 : currently not OK -> try to process later on
# >=2: invalid -> discard
def should_notify(context, entry):
# Check disabling
if entry.get("disabled"):
notify_log(" - Skipping: it is disabled for this user")
return False
# Check host, if configured
if entry.get("only_hosts"):
hostname = context.get("HOSTNAME")
skip = True
regex = False
negate = False
for h in entry["only_hosts"]:
if h.startswith("!"): # negate
negate = True
h = h[1:]
elif h.startswith('~'):
regex = True
h = h[1:]
if not regex and hostname == h:
skip = negate
break
elif regex and re.match(h, hostname):
skip = negate
break
if skip:
notify_log(" - Skipping: host '%s' matches none of %s" % (hostname, ", ".join(entry["only_hosts"])))
return False
# Check if the host has to be in a special service_level
if "match_sl" in entry:
from_sl, to_sl = entry['match_sl']
if context['WHAT'] == "SERVICE" and context.get('SVC_SL','').isdigit():
sl = saveint(context.get('SVC_SL'))
else:
sl = saveint(context.get('HOST_SL'))
if sl < from_sl or sl > to_sl:
notify_log(" - Skipping: service level %d not between %d and %d" % (sl, from_sl, to_sl))
return False
# Skip blacklistet serivces
if entry.get("service_blacklist"):
servicedesc = context.get("SERVICEDESC")
if not servicedesc:
notify_log(" - Proceed: blacklist certain services, but this is a host notification")
else:
for s in entry["service_blacklist"]:
if re.match(s, servicedesc):
notify_log(" - Skipping: service '%s' matches blacklist (%s)" % (
servicedesc, ", ".join(entry["service_blacklist"])))
return False
# Check service, if configured
if entry.get("only_services"):
servicedesc = context.get("SERVICEDESC")
if not servicedesc:
notify_log(" - Proceed: limited to certain services, but this is a host notification")
else:
# Example
# only_services = [ "!LOG foo", "LOG", BAR" ]
# -> notify all services beginning with LOG or BAR, but not "LOG foo..."
skip = True
for s in entry["only_services"]:
if s.startswith("!"): # negate
negate = True
s = s[1:]
else:
negate = False
if re.match(s, servicedesc):
skip = negate
break
if skip:
notify_log(" - Skipping: service '%s' matches none of %s" % (
servicedesc, ", ".join(entry["only_services"])))
return False
# Check notification type
event, allowed_events = check_notification_type(context, entry["host_events"], entry["service_events"])
if event not in allowed_events:
notify_log(" - Skipping: wrong notification type %s (%s), only %s are allowed" %
(event, context["NOTIFICATIONTYPE"], ",".join(allowed_events)) )
return False
# Check notification number (in case of repeated notifications/escalations)
if "escalation" in entry:
from_number, to_number = entry["escalation"]
if context["WHAT"] == "HOST":
notification_number = int(context.get("HOSTNOTIFICATIONNUMBER", 1))
else:
notification_number = int(context.get("SERVICENOTIFICATIONNUMBER", 1))
if notification_number < from_number or notification_number > to_number:
notify_log(" - Skipping: notification number %d does not lie in range %d ... %d" %
(notification_number, from_number, to_number))
return False
if "timeperiod" in entry:
timeperiod = entry["timeperiod"]
if timeperiod and timeperiod != "24X7":
if not check_timeperiod(timeperiod):
notify_log(" - Skipping: time period %s is currently not active" % timeperiod)
return False
return True
def check_notification_type(context, host_events, service_events):
notification_type = context["NOTIFICATIONTYPE"]
if context["WHAT"] == "HOST":
allowed_events = host_events
state = context["HOSTSTATE"]
events = { "UP" : 'r', "DOWN" : 'd', "UNREACHABLE" : 'u' }
else:
allowed_events = service_events
state = context["SERVICESTATE"]
events = { "OK" : 'r', "WARNING" : 'w', "CRITICAL" : 'c', "UNKNOWN" : 'u' }
if notification_type == "RECOVERY":
event = 'r'
elif notification_type in [ "FLAPPINGSTART", "FLAPPINGSTOP", "FLAPPINGDISABLED" ]:
event = 'f'
elif notification_type in [ "DOWNTIMESTART", "DOWNTIMEEND", "DOWNTIMECANCELLED"]:
event = 's'
elif notification_type == "ACKNOWLEDGEMENT":
event = 'x'
else:
event = events.get(state, '?')
return event, allowed_events
#.
# .--Plain Email---------------------------------------------------------.
# | ____ _ _ _____ _ _ |
# | | _ \| | __ _(_)_ __ | ____|_ __ ___ __ _(_) | |
# | | |_) | |/ _` | | '_ \ | _| | '_ ` _ \ / _` | | | |
# | | __/| | (_| | | | | | | |___| | | | | | (_| | | | |
# | |_| |_|\__,_|_|_| |_| |_____|_| |_| |_|\__,_|_|_| |
# | |
# +----------------------------------------------------------------------+
# | Plain Email notification, inline implemented. This is also being |
# | used as a pseudo-plugin by Flexible Notification and RBN. |
# '----------------------------------------------------------------------'
def notify_plain_email(raw_context):
plugin_context = create_plugin_context(raw_context, [])
if notification_spooling:
create_spoolfile({"context": plugin_context, "plugin" : None})
else:
notify_log("Sending plain email to %s" % plugin_context["CONTACTNAME"])
notify_via_email(plugin_context)
def notify_via_email(plugin_context):
notify_log(substitute_context(notification_log_template, plugin_context))
if plugin_context["WHAT"] == "SERVICE":
subject_t = notification_service_subject
body_t = notification_service_body
else:
subject_t = notification_host_subject
body_t = notification_host_body
subject = substitute_context(subject_t, plugin_context)
plugin_context["SUBJECT"] = subject
body = substitute_context(notification_common_body + body_t, plugin_context)
command = substitute_context(notification_mail_command, plugin_context)
command_utf8 = command.encode("utf-8")
# Make sure that mail(x) is using UTF-8. Otherwise we cannot send notifications
# with non-ASCII characters. Unfortunately we do not know whether C.UTF-8 is
# available. If e.g. nail detects a non-Ascii character in the mail body and
# the specified encoding is not available, it will silently not send the mail!
# Our resultion in future: use /usr/sbin/sendmail directly.
# Our resultion in the present: look with locale -a for an existing UTF encoding
# and use that.
old_lang = os.getenv("LANG", "")
for encoding in os.popen("locale -a 2>/dev/null"):
l = encoding.lower()
if "utf8" in l or "utf-8" in l or "utf.8" in l:
encoding = encoding.strip()
os.putenv("LANG", encoding)
if notification_logging >= 2:
notify_log("Setting locale for mail to %s." % encoding)
break
else:
notify_log("No UTF-8 encoding found in your locale -a! Please provide C.UTF-8 encoding.")
# Important: we must not output anything on stdout or stderr. Data of stdout
# goes back into the socket to the CMC in keepalive mode and garbles the
# handshake signal.
if notification_logging >= 2:
notify_log("Executing command: %s" % command)
p = subprocess.Popen(command_utf8, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout_txt, stderr_txt = p.communicate(body.encode("utf-8"))
exitcode = p.returncode
os.putenv("LANG", old_lang) # Important: do not destroy our environment
if exitcode != 0:
notify_log("ERROR: could not deliver mail. Exit code of command is %r" % exitcode)
for line in (stdout_txt + stderr_txt).splitlines():
notify_log("mail: %s" % line.rstrip())
return 2
return 0
#.
# .--Plugins-------------------------------------------------------------.
# | ____ _ _ |
# | | _ \| |_ _ __ _(_)_ __ ___ |
# | | |_) | | | | |/ _` | | '_ \/ __| |
# | | __/| | |_| | (_| | | | | \__ \ |
# | |_| |_|\__,_|\__, |_|_| |_|___/ |
# | |___/ |
# +----------------------------------------------------------------------+
# | Code for the actuall calling of notification plugins (scripts). |
# '----------------------------------------------------------------------'
# Exit codes for plugins and also for our functions that call the plugins:
# 0: Notification successfully sent
# 1: Could not send now, please retry later
# 2: Cannot send, retry does not make sense
# Add the plugin parameters to the envinroment. We have two types of parameters:
# - list, the legacy style. This will lead to PARAMETERS_1, ...
# - dict, the new style for scripts with WATO rule. This will lead to
# PARAMETER_FOO_BAR for a dict key named "foo_bar".
def create_plugin_context(raw_context, params):
plugin_context = {}
plugin_context.update(raw_context) # Make a real copy
if type(params) == list:
plugin_context["PARAMETERS"] = " ".join(params)
for nr, param in enumerate(params):
plugin_context["PARAMETER_%d" % (nr + 1)] = param
else:
for key, value in params.items():
plugin_context["PARAMETER_" + key.upper()] = plugin_param_to_string(value)
return plugin_context
def create_bulk_parameter_context(params):
dict_context = create_plugin_context({}, params)
return [ "%s=%s\n" % (varname, value.replace("\r", "").replace("\n", "\1"))
for (varname, value) in dict_context.items() ]
def plugin_param_to_string(value):
if type(value) in ( str, unicode ):
return value
elif type(value) in ( int, float ):
return str(value)
elif value == None:
return ""
elif value == True:
return "yes"
elif value == False:
return ""
elif type(value) in ( tuple, list ):
return "\t".join(value)
else:
return repr(value) # Should never happen
def path_to_notification_script(plugin):
# Call actual script without any arguments
if local_notifications_dir:
path = local_notifications_dir + "/" + plugin
if not os.path.exists(path):
path = notifications_dir + "/" + plugin
else:
path = notifications_dir + "/" + plugin
if not os.path.exists(path):
notify_log("Notification plugin '%s' not found" % plugin)
notify_log(" not in %s" % notifications_dir)
if local_notifications_dir:
notify_log(" and not in %s" % local_notifications_dir)
return None
else:
return path
# This is the function that finally sends the actual notificion.
# It does this by calling an external script are creating a
# plain email and calling bin/mail.
#
# It also does the central logging of the notifications
# that are actually sent out.
#
# Note: this function is *not* being called for bulk notification.
def call_notification_script(plugin, plugin_context):
core_notification_log(plugin, plugin_context)
# The "Pseudo"-Plugin None means builtin plain email
if not plugin:
return notify_via_email(plugin_context)
# Call actual script without any arguments
path = path_to_notification_script(plugin)
if not path:
return 2
# Export complete context to have all vars in environment.
# Existing vars are replaced, some already existing might remain
for key in plugin_context:
if type(plugin_context[key]) == bool:
notify_log("INTERNAL ERROR: %s=%s is of type bool" % (key, plugin_context[key]))
os.putenv('NOTIFY_' + key, plugin_context[key].encode('utf-8'))
notify_log(" executing %s" % path)
out = os.popen(path + " 2>&1 </dev/null")
for line in out:
notify_log("Output: %s" % line.rstrip().decode('utf-8'))
exitcode = out.close()
if exitcode:
notify_log("Plugin exited with code %d" % (exitcode >> 8))
else:
exitcode = 0
# Clear environment again. TODO: We could os process.Popen and specify
# the environment without destroying it?
for key in plugin_context:
os.unsetenv('NOTIFY_' + key)
return exitcode
#.
# .--Spooling------------------------------------------------------------.
# | ____ _ _ |
# | / ___| _ __ ___ ___ | (_)_ __ __ _ |
# | \___ \| '_ \ / _ \ / _ \| | | '_ \ / _` | |
# | ___) | |_) | (_) | (_) | | | | | | (_| | |
# | |____/| .__/ \___/ \___/|_|_|_| |_|\__, | |
# | |_| |___/ |
# +----------------------------------------------------------------------+
# | Some functions dealing with the spooling of notifications. |
# '----------------------------------------------------------------------'
def create_spoolfile(data):
if not os.path.exists(notification_spooldir):
os.makedirs(notification_spooldir)
file_path = "%s/%s" % (notification_spooldir, fresh_uuid())
notify_log("Creating spoolfile: %s" % file_path)
file(file_path,"w").write(pprint.pformat(data))
# There are three types of spool files:
# 1. Notifications to be forwarded. Contain key "forward"
# 2. Notifications for async local delivery. Contain key "plugin"
# 3. Notifications to *got* forwarded. Contain neither of both.
# Spool files of type 1 are not handled here!
def handle_spoolfile(spoolfile):
try:
data = eval(file(spoolfile).read())
if "plugin" in data:
plugin_context = data["context"]
plugin = data["plugin"]
notify_log("Got spool file for local delivery via %s" % (
plugin or "plain mail"))
return call_notification_script(plugin, plugin_context)
else:
# We received a forwarded raw notification. We need to process
# this with our local notification rules in order to call one,
# several or no actual plugins.
notify_log("Got spool file from remote host for local delivery.")
raw_context = data["context"]
locally_deliver_raw_context(data["context"])
return 0 # No error handling for async delivery
except Exception, e:
notify_log("ERROR %s\n%s" % (e, format_exception()))
return 2
#.
# .--Bulk-Notifications--------------------------------------------------.
# | ____ _ _ |
# | | __ ) _ _| | | __ |
# | | _ \| | | | | |/ / |
# | | |_) | |_| | | < |
# | |____/ \__,_|_|_|\_\ |
# | |
# +----------------------------------------------------------------------+
# | Store postponed bulk notifications for later delivery. Deliver such |
# | notifications on cmk --notify bulk. |
# '----------------------------------------------------------------------'
def do_bulk_notify(contact, plugin, params, plugin_context, bulk):
# First identify the bulk. The following elements identify it:
# 1. contact
# 2. plugin
# 3. time horizon (interval) in seconds
# 4. max bulked notifications
# 5. elements specified in bulk["groupby"] and bulk["groupby_custom"]
# We first create a bulk path constructed as a tuple of strings.
# Later we convert that to a unique directory name.
# Note: if you have separate bulk rules with exactly the same
# bulking options, then they will use the same bulk.
what = plugin_context["WHAT"]
bulk_path = (contact, plugin, str(bulk["interval"]), str(bulk["count"]))
bulkby = bulk["groupby"]
if "host" in bulkby:
bulk_path += ("host", plugin_context["HOSTNAME"])
elif "folder" in bulkby:
bulk_path += ("folder", find_wato_folder(plugin_context))
if "service" in bulkby:
bulk_path += ("service", plugin_context.get("SERVICEDESC", ""))
if "sl" in bulkby:
sl = plugin_context.get(what + "_SL", "")
bulk_path += ("sl", sl)
if "check_type" in bulkby:
command = plugin_context.get(what + "CHECKCOMMAND", "").split("!")[0]
bulk_path += ("check_type", command)
if "state" in bulkby:
state = plugin_context.get(what + "STATE", "")
bulk_path += ("state", state)
# User might have specified _FOO instead of FOO
bulkby_custom = bulk.get("groupby_custom", [])
for macroname in bulkby_custom:
macroname = macroname.lstrip("_").upper()
value = plugin_context.get(what + "_" + macroname, "")
bulk_path += (macroname.lower(), value)
notify_log(" --> storing for bulk notification %s" % "|".join(bulk_path))
bulk_dirname = create_bulk_dirname(bulk_path)
uuid = fresh_uuid()
filename = bulk_dirname + "/" + uuid
file(filename + ".new", "w").write("%r\n" % ((params, plugin_context),))
os.rename(filename + ".new", filename) # We need an atomic creation!
notify_log(" - stored in %s" % filename)
def find_wato_folder(context):
for tag in context.get("HOSTTAGS", "").split():
if tag.startswith("/wato/"):
return tag[6:].rstrip("/")
return ""
def create_bulk_dirname(bulk_path):
dirname = notification_bulkdir + "/" + bulk_path[0] + "/" + bulk_path[1] + "/"
dirname += ",".join([b.replace("/", "\\") for b in bulk_path[2:]])
# Remove non-Ascii-characters by special %02x-syntax
try:
str(dirname)
except:
new_dirname = ""
for char in dirname:
if ord(char) <= 0 or ord(char) > 127:
new_dirname += "%%%04x" % ord(char)
else:
new_dirname += char
dirname = new_dirname
if not os.path.exists(dirname):
os.makedirs(dirname)
notify_log(" - created bulk directory %s" % dirname)
return dirname
def find_bulks(only_ripe):
if not os.path.exists(notification_bulkdir):
return []
now = time.time()
bulks = []
dir_1 = notification_bulkdir
for contact in os.listdir(dir_1):
if contact.startswith("."):
continue
dir_2 = dir_1 + "/" + contact
for method in os.listdir(dir_2):
if method.startswith("."):
continue
dir_3 = dir_2 + "/" + method
for bulk in os.listdir(dir_3):
parts = bulk.split(',') # e.g. 60,10,host,localhost
try:
interval = int(parts[0])
count = int(parts[1])
except:
notify_log("Skipping invalid bulk directory %s" % dir_3)
continue
dir_4 = dir_3 + "/" + bulk
uuids = []
oldest = time.time()
for uuid in os.listdir(dir_4): # 4ded0fa2-f0cd-4b6a-9812-54374a04069f
if uuid.startswith(".") or uuid.endswith(".new"):
continue
if len(uuid) != 36:
notify_log("Skipping invalid notification file %s/%s" % (dir_4, uuid))
continue
mtime = os.stat(dir_4 + "/" + uuid).st_mtime
uuids.append((mtime, uuid))
oldest = min(oldest, mtime)
uuids.sort()
if not uuids:
dirage = now - os.stat(dir_4).st_mtime
if dirage > 60:
notify_log("Warning: removing orphaned empty bulk directory %s" % dir_4)
try:
os.rmdir(dir_4)
except Exception, e:
notify_log(" -> Error removing it: %s" % e)
continue
age = now - oldest
if age >= interval:
notify_log("Bulk %s is ripe: age %d >= %d" % (dir_4, age, interval))
elif len(uuids) >= count:
notify_log("Bulk %s is ripe: count %d >= %d" % (dir_4, len(uuids), count))
else:
notify_log("Bulk %s is not ripe yet (age: %d, count: %d)!" % (dir_4, age, len(uuids)))
if only_ripe:
continue
bulks.append((dir_4, age, interval, count, uuids))
return bulks
def send_ripe_bulks():
ripe = find_bulks(True)
if ripe:
notify_log("Sending out %d ripe bulk notifications" % len(ripe))
for bulk in ripe:
try:
notify_bulk(bulk[0], bulk[-1])
except Exception, e:
if opt_debug:
raise
notify_log("Error sending bulk %s: %s" % (bulk[0], format_exception()))
def notify_bulk(dirname, uuids):
parts = dirname.split("/")
contact = parts[-3]
plugin = parts[-2]
notify_log(" -> %s/%s %s" % (contact, plugin, dirname))
# If new entries are created in this directory while we are working
# on it, nothing bad happens. It's just that we cannot remove
# the directory after our work. It will be the starting point for
# the next bulk with the same ID, which is completely OK.
bulk_context = []
old_params = None
unhandled_uuids = []
for mtime, uuid in uuids:
try:
params, context = eval(file(dirname + "/" + uuid).read())
except Exception, e:
if opt_debug:
raise
notify_log(" Deleting corrupted or empty bulk file %s/%s: %s" % (dirname, uuid, e))
continue
if old_params == None:
old_params = params
elif params != old_params:
notify_log(" Parameters are different from previous, postponing into separate bulk")
unhandled_uuids.append((mtime, uuid))
continue
bulk_context.append("\n")
for varname, value in context.items():
bulk_context.append("%s=%s\n" % (varname, value.replace("\r", "").replace("\n", "\1")))
# Do not forget to add this to the monitoring log. We create
# a single entry for each notification contained in the bulk.
# It is important later to have this precise information.
plugin_name = "bulk " + (plugin or "plain email")
core_notification_log(plugin_name, context)
if bulk_context: # otherwise: only corrupted files
parameter_context = create_bulk_parameter_context(old_params)
context_text = "".join(parameter_context + bulk_context)
call_bulk_notification_script(plugin, context_text)
else:
notify_log("No valid notification file left. Skipping this bulk.")
# Remove sent notifications
for mtime, uuid in uuids:
if (mtime, uuid) not in unhandled_uuids:
path = dirname + "/" + uuid
try:
os.remove(path)
except Exception, e:
notify_log("Cannot remove %s: %s" % (path, e))
# Repeat with unhandled uuids (due to different parameters)
if unhandled_uuids:
notify_bulk(dirname, unhandled_uuids)
# Remove directory. Not neccessary if emtpy
try:
os.rmdir(dirname)
except Exception, e:
if not unhandled_uuids:
notify_log("Warning: cannot remove directory %s: %s" % (dirname, e))
def call_bulk_notification_script(plugin, context_text):
path = path_to_notification_script(plugin)
if not path:
raise MKGeneralException("Notification plugin %s not found" % plugin)
# Protocol: The script gets the context on standard input and
# read until that is closed. It is being called with the parameter
# --bulk.
p = subprocess.Popen([path, "--bulk"], shell=False,
stdout = subprocess.PIPE, stderr = subprocess.PIPE, stdin = subprocess.PIPE)
stdout_txt, stderr_txt = p.communicate(context_text.encode("utf-8"))
exitcode = p.returncode
if exitcode:
notify_log("ERROR: script %s --bulk returned with exit code %s" % (path, exitcode))
for line in (stdout_txt + stderr_txt).splitlines():
notify_log("%s: %s" % (plugin, line.rstrip()))
#.
# .--Contexts------------------------------------------------------------.
# | ____ _ _ |
# | / ___|___ _ __ | |_ _____ _| |_ ___ |
# | | | / _ \| '_ \| __/ _ \ \/ / __/ __| |
# | | |__| (_) | | | | || __/> <| |_\__ \ |
# | \____\___/|_| |_|\__\___/_/\_\\__|___/ |
# | |
# +----------------------------------------------------------------------+
# | Functions dealing with loading, storing and converting contexts. |
# '----------------------------------------------------------------------'
# Add a few further helper variables that are usefull in notification plugins
def complete_raw_context(raw_context):
raw_context["WHAT"] = raw_context.get("SERVICEDESC") and "SERVICE" or "HOST"
raw_context["MONITORING_HOST"] = socket.gethostname()
raw_context["LOGDIR"] = notification_logdir
if omd_root:
raw_context["OMD_ROOT"] = omd_root
raw_context["OMD_SITE"] = os.getenv("OMD_SITE", "")
raw_context["MAIL_COMMAND"] = notification_mail_command
# The Check_MK Micro Core sends the MICROTIME and no other time stamps. We add
# a few Nagios-like variants in order to be compatible
if "MICROTIME" in raw_context:
microtime = int(raw_context["MICROTIME"])
timestamp = float(microtime) / 1000000.0
broken = time.localtime(timestamp)
raw_context["DATE"] = time.strftime("%Y-%m-%d", broken)
raw_context["SHORTDATETIME"] = time.strftime("%Y-%m-%d %H:%M:%S", broken)
raw_context["LONGDATETIME"] = time.strftime("%a %b %d %H:%M:%S %Z %Y", broken)
raw_context['HOSTURL'] = '/check_mk/index.py?start_url=%s' % \
urlencode('view.py?view_name=hoststatus&host=%s' % raw_context['HOSTNAME'])
if raw_context['WHAT'] == 'SERVICE':
raw_context['SERVICEURL'] = '/check_mk/index.py?start_url=%s' % \
urlencode('view.py?view_name=service&host=%s&service=%s' %
(raw_context['HOSTNAME'], raw_context['SERVICEDESC']))
# Relative Timestamps for several macros
for macro in [ 'LASTHOSTSTATECHANGE', 'LASTSERVICESTATECHANGE', 'LASTHOSTUP', 'LASTSERVICEOK' ]:
if macro in raw_context:
raw_context[macro + '_REL'] = get_readable_rel_date(raw_context[macro])
# Rule based notifications enabled? We might need to complete a few macros
contact = raw_context.get("CONTACTNAME")
if not contact or contact == "check-mk-notify":
add_rulebased_macros(raw_context)
# For custom notifications the number is set to 0 by the core (Nagios and CMC). We force at least
# number 1 here, so that rules with conditions on numbers do not fail (the minimum is 1 here)
for what in [ "HOST", "SERVICE" ]:
key = what + "NOTIFICATIONNUMBER"
if key in raw_context and raw_context[key] == "0":
raw_context[key] = "1"
# Add the previous hard state. This is neccessary for notification rules that depend on certain transitions,
# like OK -> WARN (but not CRIT -> WARN). The CMC sends PREVIOUSHOSTHARDSTATE and PREVIOUSSERVICEHARDSTATE.
# Nagios does not have this information and we try to deduct this.
if "PREVIOUSHOSTHARDSTATE" not in raw_context and "LASTHOSTSTATE" in raw_context:
prev_state = raw_context["LASTHOSTSTATE"]
# When the attempts are > 1 then the last state could be identical with
# the current one, e.g. both critical. In that case we assume the
# previous hard state to be OK.
if prev_state == raw_context["HOSTSTATE"]:
prev_state = "UP"
elif "HOSTATTEMPT" not in raw_context or \
("HOSTATTEMPT" in raw_context and raw_context["HOSTATTEMPT"] != "1"):
# Here We do not know. The transition might be OK -> WARN -> CRIT and
# the initial OK is completely lost. We use the artificial state "?"
# here, which matches all states and makes sure that when in doubt a
# notification is being sent out. But when the new state is UP, then
# we know that the previous state was a hard state (otherwise there
# would not have been any notification)
if raw_context["HOSTSTATE"] != "UP":
prev_state = "?"
notify_log("Previous host hard state not known. Allowing all states.")
raw_context["PREVIOUSHOSTHARDSTATE"] = prev_state
# Same for services
if raw_context["WHAT"] == "SERVICE" and "PREVIOUSSERVICEHARDSTATE" not in raw_context:
prev_state = raw_context["LASTSERVICESTATE"]
if prev_state == raw_context["SERVICESTATE"]:
prev_state = "OK"
elif "SERVICEATTEMPT" not in raw_context or \
("SERVICEATTEMPT" in raw_context and raw_context["SERVICEATTEMPT"] != "1"):
if raw_context["SERVICESTATE"] != "OK":
prev_state = "?"
notify_log("Previous service hard state not known. Allowing all states.")
raw_context["PREVIOUSSERVICEHARDSTATE"] = prev_state
# Add short variants for state names (at most 4 characters)
for key, value in raw_context.items():
if key.endswith("STATE"):
raw_context[key[:-5] + "SHORTSTATE"] = value[:4]
if raw_context["WHAT"] == "SERVICE":
raw_context['SERVICEFORURL'] = urllib.quote(raw_context['SERVICEDESC'])
raw_context['HOSTFORURL'] = urllib.quote(raw_context['HOSTNAME'])
convert_context_to_unicode(raw_context)
# Be aware: The backlog.mk contains the raw context which has not been decoded
# to unicode yet. It contains raw encoded strings e.g. the plugin output provided
# by third party plugins which might be UTF-8 encoded but can also be encoded in
# other ways. Currently the context is converted later by bot, this module
# and the GUI. TODO Maybe we should centralize the encoding here and save the
# backlock already encoded.
def store_notification_backlog(raw_context):
path = notification_logdir + "/backlog.mk"
if not notification_backlog:
if os.path.exists(path):
os.remove(path)
return
try:
backlog = eval(file(path).read())[:notification_backlog-1]
except:
backlog = []
backlog = [ raw_context ] + backlog
file(path, "w").write("%r\n" % backlog)
def raw_context_from_backlog(nr):
try:
backlog = eval(file(notification_logdir + "/backlog.mk").read())
except:
backlog = []
if nr < 0 or nr >= len(backlog):
sys.stderr.write("No notification number %d in backlog.\n" % nr)
sys.exit(2)
notify_log("Replaying notification %d from backlog...\n" % nr)
return backlog[nr]
def raw_context_from_env():
# Information about notification is excpected in the
# environment in variables with the prefix NOTIFY_
return dict([
(var[7:], value)
for (var, value)
in os.environ.items()
if var.startswith("NOTIFY_")
and not dead_nagios_variable(value) ])
def expand_backslashes(value):
# We cannot do the following:
# value.replace(r"\n", "\n").replace("\\\\", "\\")
# \\n would be exapnded to \<LF> instead of \n. This was a bug
# in previous versions.
return value.replace("\\\\", "\0").replace("\\n", "\n").replace("\0", "\\")
def raw_context_from_stdin():
context = {}
for line in sys.stdin:
varname, value = line.strip().split("=", 1)
context[varname] = expand_backslashes(value)
return context
def raw_context_from_string(data):
# Context is line-by-line in g_notify_readahead_buffer
context = {}
try:
for line in data.split('\n'):
varname, value = line.strip().split("=", 1)
context[varname] = expand_backslashes(value)
except Exception, e: # line without '=' ignored or alerted
if opt_debug:
raise
return context
def convert_context_to_unicode(context):
# Convert all values to unicode
for key, value in context.iteritems():
if type(value) == str:
try:
value_unicode = value.decode("utf-8")
except:
try:
value_unicode = value.decode("latin-1")
except:
value_unicode = u"(Invalid byte sequence)"
context[key] = value_unicode
def substitute_context(template, context):
# First replace all known variables
for varname, value in context.items():
template = template.replace('$'+varname+'$', value)
# Remove the rest of the variables and make them empty
template = re.sub("\$[A-Z]+\$", "", template)
return template
#.
# .--Helpers-------------------------------------------------------------.
# | _ _ _ |
# | | | | | ___| |_ __ ___ _ __ ___ |
# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| |
# | | _ | __/ | |_) | __/ | \__ \ |
# | |_| |_|\___|_| .__/ \___|_| |___/ |
# | |_| |
# +----------------------------------------------------------------------+
# | Some generic helper functions |
# '----------------------------------------------------------------------'
def livestatus_fetch_query(query):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(livestatus_unix_socket)
sock.send(query)
sock.shutdown(socket.SHUT_WR)
response = sock.recv(10000000)
sock.close()
return response
def livestatus_send_command(command):
try:
message = "COMMAND [%d] %s\n" % (time.time(), command)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(livestatus_unix_socket)
sock.send(message)
sock.close()
except Exception, e:
if opt_debug:
raise
notify_log("WARNING: cannot send livestatus command: %s" % e)
notify_log("Command was: %s" % command)
def format_exception():
import traceback, StringIO, sys
txt = StringIO.StringIO()
t, v, tb = sys.exc_info()
traceback.print_exception(t, v, tb, None, txt)
return txt.getvalue()
def dead_nagios_variable(value):
if len(value) < 3:
return False
if value[0] != '$' or value[-1] != '$':
return False
for c in value[1:-1]:
if not c.isupper() and c != '_':
return False
return True
def notify_log(message):
if notification_logging >= 1:
formatted = u"%s %s\n" % (time.strftime("%F %T", time.localtime()), message)
file(notification_log, "a").write(formatted.encode("utf-8"))
def get_readable_rel_date(timestamp):
try:
change = int(timestamp)
except:
change = 0
rel_time = time.time() - change
seconds = rel_time % 60
rem = rel_time / 60
minutes = rem % 60
hours = (rem % 1440) / 60
days = rem / 1440
return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
def urlencode(s):
return urllib.quote(s)
def fresh_uuid():
try:
return file('/proc/sys/kernel/random/uuid').read().strip()
except IOError:
# On platforms where the above file does not exist we try to
# use the python uuid module which seems to be a good fallback
# for those systems. Well, if got python < 2.5 you are lost for now.
import uuid
return str(uuid.uuid4())
def core_notification_log(plugin, plugin_context):
what = plugin_context["WHAT"]
contact = plugin_context["CONTACTNAME"]
spec = plugin_context["HOSTNAME"]
if what == "HOST":
state = plugin_context["HOSTSTATE"]
output = plugin_context["HOSTOUTPUT"]
if what == "SERVICE":
spec += ";" + plugin_context["SERVICEDESC"]
state = plugin_context["SERVICESTATE"]
output = plugin_context["SERVICEOUTPUT"]
log_message = "%s NOTIFICATION: %s;%s;%s;%s;%s" % (
what, contact, spec, state, plugin or "plain email", output)
if monitoring_core == "cmc":
livestatus_send_command("LOG;" + log_message.encode("utf-8"))
else:
# Nagios and friends do not support logging via an
# external command. We write the files into a help file
# in var/check_mk/notify. If the users likes he can
# replace that file with a symbolic link to the nagios
# log file. But note: Nagios logging might not atomic.
file(notification_core_log, "a").write("[%d] %s\n" % (time.time(), log_message.encode("utf-8")))
| gpl-2.0 | 4,618,191,767,830,784,000 | 40.583795 | 134 | 0.534765 | false |
SeismicPi/SeismicPi | GUI/configParse.py | 1 | 1112 | import re
def fileToDic(configFile):
def isAlphaNumUnder(s):
return s.isalpha() or s == '_' or s.isdigit()
keyValueDict = {}
for line in configFile:
if(line[0] == '#'):
pass
else:
foundKey = False
foundSeparator = False
foundValue = False
lineKey = ""
lineValue = ""
for currentChar in line:
if(not foundKey):
if(not isAlphaNumUnder(currentChar)):
pass
else:
foundKey = True
lineKey = lineKey + currentChar
else:
if(not foundSeparator):
if(not isAlphaNumUnder(currentChar)):
foundSeparator = True
else:
lineKey = lineKey + currentChar
else:
if(not foundValue):
if(not isAlphaNumUnder(currentChar)):
pass
else:
foundValue = True
lineValue = lineValue + currentChar
else:
if(not currentChar == '\n' and not currentChar == '\r'):
lineValue = lineValue + currentChar
if(foundKey and foundSeparator and foundValue):
keyValueDict[lineKey] = lineValue
configFile.close()
return keyValueDict
configFile = open('config.txt', 'r');
| mit | -5,345,107,037,231,477,000 | 19.218182 | 63 | 0.619604 | false |
intip/da-apps | applications/da_horoscopo/site.py | 1 | 3784 | # -*- encoding: LATIN1 -*-
#
# Copyright 2009 Prima Tech.
#
# Licensed under the Environ License, Version 1.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.intip.com.br/licenses/ENVIRON-LICENSE-1.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Public methods module.
"""
from publica.utils.decorators import serialize, dbconnectionapp, jsoncallback
from publica.core.portal import Portal
import datetime
SIGNOS = (("Aqu\xe1rio", 120, 218),
("Peixes", 219, 319),
("\xc1ries", 320, 419),
("Touro", 420, 520),
("G\xeameos", 521, 619),
("C\xe2ncer", 620, 721),
("Le\xe3o", 722, 821),
("Virgem", 822, 921),
("Libra", 922, 1021),
("Escorpi\xe3o", 1022, 1121),
("Sagit\xe1rio", 1122, 1220),
("Capric\xf3rnio", 1221, 119))
FILE = "/home/chacras/horoscopo_files/hor{0}.txt"
class Site:
"""
Depois de ver tudo que precisa dinamicamente, defino os metodos aqui..
ou nao =D
"""
@jsoncallback
@dbconnectionapp
def getAscendente(self, data, offset):
"""
Calcula o anjo para um dia do ano
"""
i = 0
data = int(data)
for signo in SIGNOS:
if data >= signo[1] and data <= signo[2] or i > 10:
break
i+=1
pos = i + int(offset)
if pos > 11:
pos -= 12
url = SIGNOS[pos][0]
for i in self.execSql("select_por_titulo", titulo=url):
portal = Portal(id_site=self.id_site, request=self.request)
url = portal.getUrlByApp(env_site=self.id_site,
schema=self.schema,
id_conteudo=i["id_conteudo"],
exportar=1,
admin=1,
mkattr=1)
return {"url":url, "nome":SIGNOS[pos][0]}
@jsoncallback
@dbconnectionapp
def getPar(self, bom=[], ruim=[]):
lista = set()
for i in bom:
for j in self.execSql("select_nome", id_caracteristica=i):
lista.add(j["id_conteudo"])
for i in ruim:
for j in self.execSql("select_nome", id_caracteristica=i):
if j["id_conteudo"] in lista and len(lista) > 1:
lista.remove(j["id_conteudo"])
for i in lista:
portal = Portal(id_site=self.id_site, request=self.request)
url = portal.getUrlByApp(env_site=self.id_site,
schema=self.schema,
id_conteudo=i,
exportar=1,
admin=1,
mkattr=1)
for i in self.execSql("select_titulo", id_conteudo=i):
return {"url":url, "nome":i["titulo"]}
return "404"
def _getPrevisao(self, signo):
"""
"""
arq = FILE.format(str(datetime.datetime.now().day).zfill(2))
with open(arq) as f:
lines = f.readlines()
i = 0
for line in lines:
if line[:len(signo)] == signo.upper():
return lines[i+5]
i += 1
| gpl-2.0 | -6,958,432,567,843,381,000 | 30.272727 | 78 | 0.496564 | false |
BackupTheBerlios/espressopp | testsuite/pickle_potential/testwarmup.py | 1 | 1313 | import espresso
from espresso import Real3D
d = 0.85
Nchains = 10
Mmonomers = 10
N = Nchains * Mmonomers
L = pow(N/d, 1.0/3)
system, integrator = espresso.standard_system.PolymerMelt(Nchains, Mmonomers,(10,10,10), dt = 0.005, temperature=1.0)
print "starting warmup"
org_dt = integrator.dt
pot = system.getInteraction(0).getPotential(0,0)
print pot
print "Nint = ", system.getNumberOfInteractions()
final_sigma = pot.sigma
final_epsilon = pot.epsilon
print "sigma=",pot.sigma, "epsilon=",pot.epsilon
maxParticleID = int(espresso.analysis.MaxPID(system).compute())
N = 1
number = 50
for k in range(number):
if k < 10:
continue
force_capping = espresso.integrator.CapForce(system, 1000000.0/number*k)
integrator.addExtension(force_capping)
pot.sigma = final_sigma/number*k
pot.epsilon = final_epsilon/number*k
integrator.dt = 0.0001
espresso.tools.analyse.info(system, integrator)
integrator.run(N)
espresso.tools.analyse.info(system, integrator)
integrator.dt = org_dt
pot.sigma = final_sigma
pot.epsilon = final_epsilon
force_capping.disconnect()
for k in range(10):
integrator.run(70)
espresso.tools.analyse.info(system, integrator)
integrator.step = 0
print "warmup finished"
for k in range(10):
integrator.run(100)
espresso.tools.analyse.info(system, integrator)
| gpl-3.0 | 5,970,155,635,582,008,000 | 22.446429 | 117 | 0.740289 | false |
muffato/pyEnsemblRest | read_config.py | 1 | 9452 | #!/usr/bin/env python3
import re
import sys
import glob
import os.path
import xml.etree.ElementTree as ET
# Command-line arguments
config_root = ET.parse(sys.argv[1]).getroot()
rest_checkout = sys.argv[2]
main_namespace = sys.argv[3]
# Will keep the content of all the files we're generating
files = {}
# The template modules that must be present
for module_name in ['_pyrest_core', '_pyrest_server', '__init__']:
with open('template/%s.py' % module_name, 'r') as f:
files[module_name] = f.read()
def replace_placeholder_in_template(filename, key, content_list, sep=''):
files[filename] = files[filename].replace('#'+key, sep.join(content_list))
## Generate all the modules with basic object definition
template_init_import_module = 'from . import %s'
template_module_object = """
class {0}({2}):
\"\"\"{1}\"\"\"
"""
template_construction_rules = """
%s._construction_rules = {%s}
"""
#{0} = property(lambda self : getattr(self, "_{0}"), lambda self, val : setattr(self, "_{0}", val), None, \"\"\"{1}\"\"\")
template_property = """
{0} = property(_pyrest_core.fget("_{0}"), None, None, \"\"\"{1}\"\"\")
"""
#{0} = property({2}, lambda self, val : setattr(self, "_{0}", val), None, \"\"\"{1}\"\"\")
template_property_with_special_getter = """
{0} = property({2}, None, None, \"\"\"{1}\"\"\")
"""
init_imports = []
# All the module names
for config_python_module in config_root.find('objects'):
# config_python_module is a <namespace> element
module_name = config_python_module.get('name')
if module_name is None:
raise SyntaxError("Namespace without a name")
init_imports.append(template_init_import_module % module_name)
# All the objects in this module
module_code = [ ]
ns_to_import = set( ['_pyrest_core'] )
for config_python_object in config_python_module:
# config_python_object is a <object> element
if config_python_object.get('base_class'):
ns_to_import.update( re.findall( '(\w+)\.' , config_python_object.get('base_class')) )
module_code.append( template_module_object.format(config_python_object.get('name'), config_python_object.get('description', ''), config_python_object.get('base_class', '_pyrest_core.BaseObject') ) )
construction_rules = {}
for prop in config_python_object:
# prop is a <property> element
t = template_property_with_special_getter if prop.get('getter') else template_property
module_code.append( t.format( prop.get('name'), prop.get('description', 'No documentation'), prop.get('getter') ) )
if prop.get('object'):
construction_rules[ prop.get('name') ] = prop.get('object')
ns_to_import.update( re.findall( '(\w+)\.' , prop.get('object') ) )
if construction_rules:
module_code.append( template_construction_rules % (config_python_object.get('name'), ', '.join('"%s":%s' % x for x in sorted(construction_rules.items()))) )
for n in sorted(ns_to_import):
module_code.insert(0, "\n" )
module_code.insert(0, template_init_import_module % n)
files[module_name] = "".join(module_code)
# Adds the extra methods we want on those objects
filename = 'template/%s.py' % module_name
if os.path.isfile(filename):
with open(filename, 'r') as f:
c = files[module_name]
files[module_name] = f.read()
replace_placeholder_in_template(module_name, '__GENERATED_OBJECTS__', c)
replace_placeholder_in_template('__init__', '__MODULE_IMPORTS__', init_imports, sep='\n')
endpoints = {}
for f in glob.glob('%s/root/documentation/*.conf' % rest_checkout):
try:
for e in ET.parse(f).getroot():
endpoints[e.tag] = e
except ET.ParseError as ex:
raise SyntaxError("Cannot parse " + f) from ex
# Decodes a text that is a bunch of "key=value" lines
# The keys listed in "mul" can be found in several copies
def decode_config(t, mul=[]):
pairs = []
for l in t.splitlines():
part = l.partition('=')
if part[1] == '=':
pairs.append( (part[0].strip(), part[2].strip()) )
#l = [tuple(_.strip() for _ in l.partition('=')) for l in t.splitlines() if '=' in l]
d = dict(pairs)
for k in mul:
if k in d:
d[k] = [x[1] for x in pairs if x[0] == k]
return d
template_endpoint = '''
def {0}(self, {1}, **kwargs):
"""{3}
Return type: {13}
Valid formats: {6}
HTTP endpoint: {7}
{8}
{9}"""
return self.__build_rest_answer({4}, {5}, {10}, {11}, '{2}'.format({12}), kwargs)
'''
template_endpoint_no_args = '''
def {0}(self, **kwargs):
"""{3}
Return type: {13}
Valid formats: {6}
HTTP endpoint: {7}
{8}
{9}"""
return self.__build_rest_answer({4}, {5}, {10}, {11}, '{2}', kwargs)
'''
def parameter_docstring(param_name, parameter_details):
return "- %s (%s)\n %s\n" % (param_name, parameter_details['type'], parameter_details['description'])
def allparams_docstring(title, allparams, parameter_details):
if len(allparams) == 0:
return ''
return ('%s:\n' % title) + "".join(parameter_docstring(p, parameter_details[p]) for p in allparams)
def get_code_for_endpoint(e):
endpoint_config = endpoints[e.get('id')]
d = decode_config(endpoint_config.text, ['output'])
try:
d['endpoint'] = d['endpoint'].replace('"', '')
except KeyError:
raise SyntaxError("No 'endpoint' parameter in the endpoint id '{0}'".format(endpoint_config.tag))
ordered_parameters = []
for p in (endpoint_config.find('params') or []):
t = p.text
if list(p):
print("Warning, there are some HTML tags inside the description of '{0}'. Trying to sort it out ...".format(d['endpoint']), file=sys.stderr)
for x in p:
t = t + ET.tostring(x, encoding="unicode", method="html")
print("Please check the reconstructed string:", t, file=sys.stderr )
ordered_parameters.append( (p.tag,decode_config(t)) )
parameter_details = dict(ordered_parameters)
endpoint_url_segments = []
required_params = []
for url_segment in d['endpoint'].split('/'):
if url_segment.startswith(':'):
endpoint_url_segments.append('{%d}' % len(required_params))
p = url_segment[1:]
dp = parameter_details[p]
required_params.append(p)
if dp.get('required') != '1':
print("'required' should be set to 1 for '%s' in '%s'" % (p, d['endpoint']), file=sys.stderr)
else:
endpoint_url_segments.append(url_segment)
if len(required_params) != len([x for x in parameter_details.values() if x.get('required') == '1']):
print("Discrepancy in the list of required parameters for", d['endpoint'])
optional_params = [p for (p,dp) in ordered_parameters if (p not in required_params) and ('deprecated' not in dp)]
if e.get('object'):
if 'dict_wrapper' in e.get('object'):
full_object_name = 'Dictionary of String -> %s.%s' % (main_namespace, re.findall('\((.*)\)', e.get('object'))[0])
else:
full_object_name = main_namespace + "." + e.get('object')
else:
full_object_name = "None"
return (template_endpoint if len(required_params) else template_endpoint_no_args).format(
e.get('name'),
", ".join(required_params),
'/'.join(endpoint_url_segments),
d['description'],
e.get('object') if e.get('object') is not None else "None",
d['output'],
", ".join(d['output']),
d['endpoint'],
allparams_docstring('Required parameters', required_params, parameter_details),
allparams_docstring('Optional parameters', optional_params, parameter_details),
optional_params,
None if e.get('accessor') is None else '"%s"' % e.get('accessor'),
", ".join("urllib.parse.quote(str({0}))".format(_) for _ in required_params),
full_object_name,
)
## Read all the other configurations and update _pyrest_server
def build_and_replace(template_anchor, config_tag_name, expected_tag_name, callback, sep=",\n", filename="_pyrest_server"):
data = []
for config_entry in config_root.find(config_tag_name):
assert config_entry.tag == expected_tag_name
data.append( callback(config_entry) )
replace_placeholder_in_template(filename, template_anchor, data, sep=sep)
# endpoint accessors
build_and_replace('__ENDPOINTS_METHODS__', 'endpoints', 'endpoint', get_code_for_endpoint, sep="\n")
# content_types
build_and_replace('__CONTENT_TYPES__', 'content_types', 'content_type',
lambda c: '"%s": "%s"' % (c.get('alias'), c.get('mime'))
)
# instances
template_rest_instance = """
{0} = _pyrest_server.RestServer(server_url = "{1}")
__all__.append({0})
"""
build_and_replace('__REST_INSTANCES__', 'instances', 'instance',
lambda c: template_rest_instance.format(c.get('name'), c.get('url')),
sep="", filename="__init__"
)
# response codes
build_and_replace('__RESPONSE_CODES__', 'response_codes', 'response_code',
lambda c: '%s: ("%s", "%s")' % (c.get('code'), c.get('title'), c.get('description'))
)
## Write down all the files to the disk
for (filename,content) in files.items():
with open('%s/%s.py' % (main_namespace,filename), 'w') as f:
print(content, file=f)
| apache-2.0 | -7,438,585,885,973,545,000 | 34.939163 | 206 | 0.603999 | false |
avain/claripy | claripy/__init__.py | 1 | 2002 | #!/usr/bin/env python
# pylint: disable=F0401,W0401,W0603,
import os
import sys
import socket
import logging
l = logging.getLogger("claripy")
l.addHandler(logging.NullHandler())
_all_backends = [ ]
_eager_backends = [ ]
_model_backends = [ ]
from .errors import *
from . import operations
from . import ops as _all_operations
from . import backends as _backends
backend_vsa = _backends.BackendVSA()
if not os.environ.get('WORKER', False) and os.environ.get('REMOTE', False):
try:
backend_z3 = _backends.backendremote.BackendRemote()
except socket.error:
raise ImportError("can't connect to backend")
else:
backend_z3 = _backends.BackendZ3()
backend_concrete = _backends.BackendConcrete()
_eager_backends[:] = [ backend_concrete ]
_model_backends[:] = [ backend_concrete, backend_vsa ]
_all_backends[:] = [ backend_concrete, backend_vsa, backend_z3 ]
_backends = { 'BackendVSA': backend_vsa, 'BackendZ3': backend_z3, 'BackendConcrete': backend_concrete }
#
# connect to ANA
#
import ana
if os.environ.get('REMOTE', False):
ana.set_dl(mongo_args=())
#
# Some other misguided setup
#
_recurse = 15000
l.warning("Claripy is setting the recursion limit to %d. If Python segfaults, I am sorry.", _recurse)
sys.setrecursionlimit(_recurse)
#
# Below are some exposed interfaces for general use.
#
def downsize():
backend_vsa.downsize()
backend_concrete.downsize()
backend_z3.downsize()
#
# solvers
#
from .frontend import Frontend as _Frontend
from .frontends import LightFrontend, FullFrontend, CompositeFrontend
def Solver():
return FullFrontend(backend_z3)
from .result import Result
#
# backend objects
#
from . import bv
from . import fp
from . import vsa
#
# Operations
#
from .ast.base import *
from .ast.bv import *
from .ast.fp import *
from .ast.bool import *
from . import ast
ast._import()
#
# and some aliases
#
BVV = BitVecVal
BV = BitVec
VS = ValueSet
SI = StridedInterval
TSI = TopStridedInterval
ESI = EmptyStridedInterval
| bsd-2-clause | 3,187,415,920,999,431,700 | 18.821782 | 103 | 0.713786 | false |
chrisxue815/leetcode_python | problems/test_0127_two_end_bfs.py | 1 | 1561 | import collections
import unittest
from typing import List
import utils
# O(n) time. O(n) space. Two-end BFS.
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if endWord not in wordList:
return 0
graph = collections.defaultdict(set)
for word in wordList:
for i in range(len(word)):
wildcard = word[:i] + '.' + word[i + 1:]
graph[wildcard].add(word)
result = 2
visited = {beginWord, endWord}
begin = {beginWord}
end = {endWord}
while begin and end:
if len(begin) > len(end):
begin, end = end, begin
next_set = set()
for word in begin:
for i in range(len(word)):
wildcard = word[:i] + '.' + word[i + 1:]
for nxt in graph[wildcard]:
if nxt in end:
return result
if nxt not in visited:
visited.add(nxt)
next_set.add(nxt)
begin = next_set
result += 1
return 0
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().ladderLength(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| unlicense | 5,822,229,832,422,557,000 | 25.913793 | 85 | 0.48943 | false |
sveetch/DjangoSveetchies | settings.py | 1 | 11061 | # -*- coding: utf-8 -*-
"""
Django settings for DjangoSveetchies development
This is the version for development environnment only. For production usage you should
create a new settings file like "prod_settings.py" where you import these settings and
overwrite the required ones like PROJECT_DIR, ADMINS, DATABASES, SECRET_KEY (important),
EMAIL, etc..
"""
import os
from django.utils.translation import ugettext_lazy
#####
#
# 1. To edit for each new install
#
#####
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TEMPLATE_DEBUG = ASSETS_DEBUG = DEBUG
CRISPY_FAIL_SILENTLY = not DEBUG
INTERNAL_IPS = ( '192.168.0.112', )
ADMINS = (
('Sveetch', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'NAME': 'djangosveetchies',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'django',
'PASSWORD': 'dj4ng0',
}
}
# Define the webapp absolute path
# In production this must be defined manually
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
# SMTP Settings to send Applications emails, configured for debug purpose only
# $> python -m smtpd -n -c DebuggingServer localhost:1025
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_SUBJECT_PREFIX = '[DjangoSveetchies Dev] '
SERVER_EMAIL = 'DjangoSveetchies errors <[email protected]>'
DEFAULT_FROM_EMAIL = 'DjangoSveetchies <[email protected]>'
# Site id for django.contrib.site
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$emzo7-p1^j-$s^zqo797e1-_@*hf6qxjz@93*iwr30((_ok3='
# Available cache backends
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'sveetchies-demo',
'TIMEOUT': 60,
'KEY_PREFIX': 'dev',
'OPTIONS': {
'MAX_ENTRIES': 1000
}
}
}
# Apps to display on homepage with their datas
PUBLISHED_APPS = (
#(KEYNAME[, TITLE[, DESC[, KWARGS]]])
('autobreadcrumbs', None, None, {'github':True, 'pypi':True}),
('sveeaccounts', None, None, {'github':True, 'pypi':True}),
('rstview', None, None, {'github':True, 'pypi':True}),
('sveedocuments', None, None, {'github':True, 'pypi':True}),
('djangocodemirror', None, None, {'github':True, 'pypi':True, 'demo_link': ('djangocodemirror-sample-view', [])}),
('djangotribune', None, None, {'github':True, 'pypi':True, 'demo_link': ('tribune-board', [])}),
('DjangoSveetchies', None, None, {'github':True, 'doc_link': ('documents-page-details', ['djangosveetchies'])}),
('crispy_forms_foundation', 'crispy-forms-foundation', None, {'github':True, 'pypi':True, 'doc_link': ('documents-page-details', ['crispy-forms-foundation']), 'demo_link': ('crispy-foundation-sample-view', []) }),
)
# The docutils writer to use, can be html4 or html5, html5 writer is internal code of
# sveedocuments
RSTVIEW_PARSER_WRITER = "html5"
#####
#
# 2. Optionnal
#
#####
# Medias directory name
MEDIA_DIRNAME = 'medias'
# Static directory name
STATIC_DIRNAME = 'static'
# URL that handles the media served from ``MEDIA_ROOT``. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
# Si vous utilisez une URL pour cette option, il faudra alors spécifier manuellement
# en dur la valeur de ``MEDIA_ROOT``
MEDIA_URL = '/{0}/'.format(MEDIA_DIRNAME)
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, MEDIA_DIRNAME)+"/"
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/{0}/'.format(STATIC_DIRNAME)
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, STATIC_DIRNAME)+"/"
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'webapp_statics/'),
)
ASSETS_ROOT = os.path.join(PROJECT_DIR, 'webapp_statics/')
# Absolute paths to your template directories
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates/'),
)
LOCALE_PATHS = (
os.path.join(PROJECT_DIR, 'locale'),
)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'fr'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Default URL to redirect to just after successful login
LOGIN_REDIRECT_URL = "/"
# Days until a waiting registration is closed
ACCOUNT_ACTIVATION_DAYS = 3
# Default layout to use with "crispy_forms"
CRISPY_TEMPLATE_PACK = 'foundation'
# Add some addtional templates
# NOTE: Usage of ugettext_lazy in settings should prohibited
DOCUMENTS_PAGE_TEMPLATES = {
'homepage': ('sveedocuments/homepage_with_flat_menu.html', ugettext_lazy('Home page with automatic app list')),
}
# Custom cache keys to remove with clearcache command option
DOCUMENTS_CACHE_KEYS_TO_CLEAN = ["applications_toc_on_homepage"]
# Forbidden words for slug values in documents to avoid clashes in urls
DOCUMENTS_PAGE_RESERVED_SLUGS = (
'add', 'admin', 'board', 'preview', 'inserts', 'documents-help', 'sitemap', # for sveedocuments
'djangocodemirror-sample', # for djangocodemirror sample
'accounts', 'captcha', # for sveeaccounts
'tribune', # for djangotribune
)
# Cookie name used to store and retreive user settings for editor
DJANGOCODEMIRROR_USER_SETTINGS_COOKIE_NAME = "djangocodemirror_user_settings"
# Additional Django-CodeMirror settings for sveedocuments
CODEMIRROR_SETTINGS = {
'sveetchies-documents-page': {
'mode': 'rst',
'csrf': 'CSRFpass',
'preview_url': ('documents-preview',),
'quicksave_url': ('documents-page-quicksave',),
'quicksave_datas': 'DJANGOCODEMIRROR_OBJECT',
'lineWrapping': False,
'lineNumbers': True,
'search_enabled': True,
'settings_cookie': DJANGOCODEMIRROR_USER_SETTINGS_COOKIE_NAME,
'help_link': ('documents-help',),
'settings_url': ('documents-editor-settings', [], {}),
},
'sveetchies-documents-insert': {
'mode': 'rst',
'csrf': 'CSRFpass',
'preview_url': ('documents-preview',),
'quicksave_url': ('documents-insert-quicksave',),
'quicksave_datas': 'DJANGOCODEMIRROR_OBJECT',
'lineWrapping': False,
'lineNumbers': True,
'search_enabled': True,
'settings_cookie': DJANGOCODEMIRROR_USER_SETTINGS_COOKIE_NAME,
'help_link': ('documents-help',),
'settings_url': ('documents-editor-settings', [], {}),
},
}
#####
#
# 3. Don't touch this
#
#####
# Assets bundles module
ASSETS_MODULES = (
'DjangoSveetchies.assets',
)
# For debug_toolbar
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
DEBUG_TOOLBAR_PANELS = (
#'debug_toolbar_user_panel.panels.UserPanel',
#'inserdiag_webapp.utils.debugtoolbar_filter.InserdiagVersionDebugPanel',
#'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
#'debug_toolbar.panels.signals.SignalDebugPanel',
#'debug_toolbar.panels.logger.LoggingPanel',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'DjangoSveetchies.utils.site_metas',
'autobreadcrumbs.context_processors.AutoBreadcrumbsContext',
'sveedocuments.context_processors.SveedocumentsContext',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'DjangoSveetchies.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'DjangoSveetchies.wsgi.application'
INSTALLED_APPS = (
'django_assets',
'captcha',
'crispy_forms',
'crispy_forms_foundation',
#'debug_toolbar',
'mptt',
'registration',
'rstview',
'autobreadcrumbs',
'djangocodemirror',
'sveeaccounts',
'sveedocuments',
'djangotribune',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | 6,801,798,735,232,977,000 | 32.515152 | 217 | 0.689421 | false |
CS4098/GroupProject | src/test/uitest/uitest.py | 1 | 3665 | import unittest
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
class pmlFile(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Remote("http://localhost:4444/wd/hub", webdriver.DesiredCapabilities.HTMLUNIT.copy())
def testUI(self):
driver = self.driver
driver.get("http://localhost/GroupProject/")
fileUpload = driver.find_element_by_name("pmlfile")
fileUpload.send_keys("src/test/uitest/test.pml")
fileUpload.submit()
pmlFile = driver.find_element_by_id("pml")
assert pmlFile.text != ""
def tearDown(self):
self.driver.quit()
class promelaGenerator(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Remote("http://localhost:4444/wd/hub", webdriver.DesiredCapabilities.HTMLUNIT.copy())
def testUI(self):
driver = self.driver
driver.get("http://localhost/GroupProject/")
fileUpload = driver.find_element_by_name("pmlfile")
fileUpload.send_keys("src/test/uitest/test.pml")
fileUpload.submit()
promela = driver.find_element_by_id("promela")
assert promela.text != ""
def tearDown(self):
self.driver.quit()
class spinOutput(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Remote("http://localhost:4444/wd/hub", webdriver.DesiredCapabilities.HTMLUNIT.copy())
def testUI(self):
driver = self.driver
driver.get("http://localhost/GroupProject/")
fileUpload = driver.find_element_by_name("pmlfile")
fileUpload.send_keys("src/test/uitest/test.pml")
fileUpload.submit()
resourceValue = driver.find_element_by_name("resourcefile")
resourceValue.submit()
spin = driver.find_element_by_id("spin")
assert spin.text != ""
def tearDown(self):
self.driver.quit()
class spinTrailOutput(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Remote("http://localhost:4444/wd/hub", webdriver.DesiredCapabilities.HTMLUNIT.copy())
def testUI(self):
driver = self.driver
driver.get("http://localhost/GroupProject/")
fileUpload = driver.find_element_by_name("pmlfile")
fileUpload.send_keys("src/test/uitest/test.pml")
fileUpload.submit()
resourceValue = driver.find_element_by_name("resourcefile")
resourceValue.submit()
spin = driver.find_element_by_id("spintrail")
assert spin.text != ""
def tearDown(self):
self.driver.quit()
class predicate(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Remote("http://localhost:4444/wd/hub", webdriver.DesiredCapabilities.HTMLUNIT.copy())
def testUI(self):
driver = self.driver
driver.get("http://localhost/GroupProject/")
fileUpload = driver.find_element_by_name("pmlfile")
fileUpload.send_keys("src/test/uitest/test.pml")
fileUpload.submit()
button = driver.find_element_by_id("predicate")
button.click()
resource = driver.find_element_by_id("resource").text
button.submit()
predicate = driver.find_element_by_id("predicate").text
value = predicate.encode("ascii").split("<> ")[1].split(")")[0]
assert resource == value
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
unittest.main()
| mit | -7,975,876,089,453,846,000 | 31.149123 | 117 | 0.660846 | false |
InUrSys/PescArt2.0 | src/srcPlus/GenericCalculoEstim.py | 1 | 8058 | '''
Created on 24/11/2017
@author: chernomirdinmacuvele
'''
from PyQt5.Qt import QDialog, QStandardItemModel, QStandardItem, QComboBox,\
QModelIndex
import mixedModel
import QT_tblViewUtility
import CustomWidgets
import FuncSQL
import searchSimple
import pprint
class GenericCalculoEstim(QDialog):
def CBTextHint(self, Combox=None):
mdel = QStandardItemModel(Combox.model())
firstIndex = mdel.index(0, Combox.modelColumn(), Combox.rootModelIndex())
firstItem = QStandardItem(mdel.itemFromIndex(firstIndex))
firstItem.setSelectable(False)
def setCB(self):
lstquer = ["select null as id, '-ANO-' as nome UNION all (select distinct data_ano, cast (data_ano as text) as d_ano from view_ano_mes order by data_ano)",
"select null as id, '-ANO-' as nome UNION all (select distinct data_ano, cast (data_ano as text) as d_ano from view_ano_mes order by data_ano)",
"select null as id, '-Agrupamento-' as nome UNION all (select mes_num, nome from prc_agrupamentos order by mes_num)",
"select null as id, '-Tipo de Dia-' as nome union all select id, nome from ref_table where id_grupo = 'TPD'",
"select null as id, '-Periodo do Dia-' as nome union all SELECT id, nome FROM public.prc_periodo_dia;"]
lstWdg = [self.CBAno_inicio, self.CBAno_Fim, self.CBAgrupamento, self.CBTipDia, self.CBPeriodoDia]
for idx, val in enumerate (lstWdg):
model = mixedModel.setQueryModel(query = lstquer[idx])
val.setModel(model)
val.setModelColumn(1)
def openToSearch(self):
dictlstwdg= {
'TBEspecies':[self.CBEspecie, "select nome from ref_especies ", "select id from ref_especies where nome = '{nome}'"],
'TBSexo': [self.CBSexo, "select nome from ref_table where id_grupo = 'SEX'", "select id from ref_table where id_grupo = 'SEX' and nome = '{nome}'"]
}
val = dictlstwdg.get(self.sender().objectName())
if val is not None:
bOK, rsModel = FuncSQL.multLineSelect(scpt= val[1])
model = CustomWidgets.modelWithCheck_search(lstResult=rsModel )
if val[0].count() > 0:
lstIn = self.getALLItemFromCB(cb=val[0])
popSearch = searchSimple.simpleSearch(lastSelected= lstIn ,model=model)
else:
popSearch = searchSimple.simpleSearch(model=model)
popSearch.exec_()
if popSearch.close():
bOK, lstIn = popSearch.bOK
if bOK:
lstID = []
val[0].clear()
val[0].addItems(lstIn)
for nome in lstIn:
bok, Id = FuncSQL.anySelectScript(scpt= val[2].format(nome = nome))
if bok:
lstID.append(Id)
def getALLItemFromCB(self, cb=None):
rows = cb.count()
lstOut = []
for i in range(rows):
val = cb.itemText(i)
lstOut.append(val)
return lstOut
def setAgruToDefault(self):
self.CBAgrupamento.setCurrentIndex(0)
def setTView(self):
for idx, val in enumerate(self.dictTblView ['lstquer']):
View= self.dictTblView ['lstView'][idx]
Size= self.dictTblView ['size'][idx]
Hide= self.dictTblView ['toHide'][idx]
model = mixedModel.setQueryModel(query= val)
QT_tblViewUtility.setViewCustom(tblView= View, lstSizeCol=Size)
QT_tblViewUtility.setModelInView(tblView= View, ViewModel = model, toHide=Hide)
def setCBMes(self, val=-99):
quer = "select null, '-MES-' UNION all select cast (data_mes as text), mes_nome from view_ano_mes where data_ano = {inicioAno}".format(inicioAno= val)
model = mixedModel.setQueryModel(query = quer)
dictlstWdg= {self.CBAno_inicio.objectName(): self.CBMes_inicio,
self.CBAno_Fim.objectName(): self.CBMes_Fim
}
val = dictlstWdg.get(self.sender().objectName())
if val is not None:
val.setModel(model)
val.setModelColumn(1)
def getVal_Combox(self):
val = mixedModel.getDataCombox(widg=self.sender())
self.setCBMes(val= val)
def getAllToCalculo(self):
lstAno = self.getAno() #Ano inicio e Fim
lstMes_Agru_tipDia = self.getMes_Agru_tipDia() #Mes Inicio, Fim, Agrupamento, Tipo de Dia
lstPeriodo = self.getPeriodo() #Periodo
lstEspe_Sexo = self.getEspe_Sexo() # Lista Especies, Lista Sexo
lstArte_Catego = self.getArtes_Categ()
pprint.pprint(lstAno)
pprint.pprint(lstMes_Agru_tipDia)
pprint.pprint(lstPeriodo)
pprint.pprint(lstEspe_Sexo)
pprint.pprint(lstArte_Catego)
def getAno(self):
lstWdg = [self.CBAno_inicio, self.CBAno_Fim]
lstOut = []
for val in (lstWdg):
if val.currentIndex() != 0:
ano = val.currentText()
lstOut.append(ano)
return lstOut
def getMes_Agru_tipDia(self):
lstWdg = [self.CBMes_inicio, self.CBMes_Fim, self.CBAgrupamento, self.CBTipDia]
lstOut = []
for idx, val in enumerate (lstWdg):
if val.currentIndex() != 0:
txtOut = mixedModel.getDataCombox(widg= val)
tpOut = [txtOut,val.currentText()]
lstOut.append(tpOut)
return lstOut
def getPeriodo(self):
wdg = self.CBPeriodoDia
quer = "SELECT id, nome, inicio, fim FROM public.prc_periodo_dia where nome like '{nome}';"
lstOut =None
if wdg.currentIndex() != 0:
nome = wdg.currentText()
bOK,lstIn = FuncSQL.anySelectScript(scpt=quer.format(nome = nome))
if bOK:
lstOut = lstIn
return lstOut
def getEspe_Sexo(self):
dDict = {
'widget':[self.CBEspecie, self.CBSexo],
'query':["SELECT id, nome FROM public.ref_especies where nome like '{nome}';",
"SELECT id, nome FROM public.ref_table where id_grupo like 'SEX' and nome like '{nome}';"]
}
lstQuer = dDict['query']
lstWdg = dDict['widget']
lstOut = [[],[]]
for idx, val in enumerate (lstWdg):
bOK=False
if val == self.CBEspecie:
if self.RBTodos.isChecked():
quer = "SELECT id, nome FROM public.ref_especies;"
bOK, lstIn = FuncSQL.multLineSelect(scpt= quer)
else:
rows = val.count()
if rows > 0:
for i in range(rows):
txtIn = val.itemText(i)
bOK, lstIn = FuncSQL.anySelectScript(scpt= lstQuer[idx].format(nome=txtIn))
else:
rows = val.count()
if rows > 0:
for i in range(rows):
txtIn = val.itemText(i)
bOK, lstIn = FuncSQL.anySelectScript(scpt= lstQuer[idx].format(nome=txtIn))
if bOK:
lstOut[idx].append(lstIn)
return lstOut
def getArtes_Categ(self):
lstClicked = [self.arteClicked, self.categoriaClicked]
lstOut=[]
for val in lstClicked:
if val is not None:
row = val.row()
model = val.model()
valOut = [model.record(row).value(0), model.record(row).value(1)]
lstOut.append(valOut)
return lstOut
| gpl-3.0 | 2,110,455,335,392,600,800 | 39.094527 | 167 | 0.542194 | false |
wmaciel/crowd-sketch-filter | src/splitter.py | 1 | 1854 | import sys
import os
import shutil
from PIL import Image
def generate_1d_box_limits(dim, num_divs):
'''
Generates the limits of a crop box in a single dimension
Example, if dim is 512 pixels and num_divs is 5, it should return:
[(0, 103), (103, 206), (206, 308), (308, 410), (410, 512)]
'''
assert dim >= num_divs
div_size = dim / num_divs
missing_pixels = dim % num_divs
boxes = []
d_0 = 0
while d_0 < dim:
d_1 = d_0 + div_size
if missing_pixels > 0:
d_1 += 1
missing_pixels -= 1
boxes.append((d_0, d_1))
d_0 = d_1
return boxes
def split_image(file_path, n_x, n_y, out_folder):
'''
Splits the image from file_path into n_x * n_y pieces and saves them inside out_folder
'''
# Overwrites out_folder if it already exists!
if os.path.isdir(out_folder):
shutil.rmtree(out_folder)
os.makedirs(out_folder)
img_path_list = []
with Image.open(file_path) as original:
original_width, original_height = original.size
boxes_x = generate_1d_box_limits(original_width, n_x)
boxes_y = generate_1d_box_limits(original_height, n_y)
# Merge 1d boxes into 2d boxes
boxes = []
for box_y in boxes_y:
for box_x in boxes_x:
box = (box_x[0], box_y[0], box_x[1], box_y[1])
boxes.append(box)
for box in boxes:
region = original.crop(box)
filename = str(box[0]) + '_' + str(box[1]) + '_' + str(box[2]) + '_' + str(box[3]) + '.bmp'
out_path = os.path.join(out_folder, filename)
region.save(out_path)
img_path_list.append(out_path)
return img_path_list
if __name__ == '__main__':
split_image(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
| mit | -5,730,984,335,753,049,000 | 26.671642 | 103 | 0.552319 | false |
jrbainter/netana | netana/equations.py | 1 | 1464 | from griddialog import *
from wequ import wequ
import pickle
from tkinter.messagebox import showerror
class Equations():
def cleanup(self,dig):
if dig.status == "Save":
self.Mat = dig.get()
self.SaveEquations()
if dig.top: dig.top.destroy()
def SaveEquations(self):
# Save Mat Equations to file
if len(self.Mat) > 1: # if it has been defined
pickle.dump(self.Mat, open(self.EquFileName ,'wb'))
def RestoreEquations(self):
if os.path.exists(self.EquFileName):
self.Mat=pickle.load(open(self.EquFileName,'rb'))
def getequ(self):
# Build Grid Table and get Equations
if os.path.exists(self.EquFileName):
self.RestoreEquations()
dig = GridDialog(self.parent,self.Mat,collab=self.AnalType)
self.cleanup(dig)
elif os.path.exists(self.NetFileName):
self.Mat = wequ(self.NetFileName)
dig = GridDialog(self.parent,self.Mat,collab=self.AnalType)
self.cleanup(dig)
else:
dig = GridDialog(self.parent,size=self.Nodes,collab=self.AnalType)
self.cleanup(dig)
if __name__ == "__main__":
import os,pickle
from tkinter import *
root = Tk()
os.chdir('/home/jim/test')
eq = Equations(root)
eq.Mat = [[1,2,3,4],
[5,6,7,8],
[9,10,11,12],
[13,14,15,16]]
eq.AnalType = "Node"
eq.Nodes=4
eq.EquFileName = "/home/jim/test/Wein_Bridge.equ"
#eq.NetFileName = "/home/jim/test/BainterFilter.net"
eq.EquFileName=""
#eq.NetFileName=""
eq.getequ()
print('Mat = {}'.format(eq.Mat))
root.mainloop()
| gpl-3.0 | 7,628,883,418,903,884,000 | 21.181818 | 69 | 0.68306 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.