repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
adamfisk/littleshoot-client | server/appengine/littleshoot/jsonpickle/__init__.py | 1 | 2991 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- 7oars.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Python library for serializing any arbitrary object graph into JSON
>>> import jsonpickle
>>> from jsonpickle.tests.classes import Thing
Create an object.
>>> obj = Thing('A String')
>>> print obj.name
A String
Use jsonpickle to transform the object into a JSON string.
>>> pickled = jsonpickle.dumps(obj)
>>> print pickled
{"classname__": "Thing", "child": null, "name": "A String", "classmodule__": "jsonpickle.tests.classes"}
Use jsonpickle to recreate a Python object from a JSON string
>>> unpickled = jsonpickle.loads(pickled)
>>> print unpickled.name
A String
The new object has the same type and data, but essentially is now a copy of the original.
>>> obj == unpickled
False
>>> obj.name == unpickled.name
True
>>> type(obj) == type(unpickled)
True
If you will never need to load (regenerate the Python class from JSON), you can
pass in the keyword unpicklable=False to prevent extra information from being
added to JSON.
>>> oneway = jsonpickle.dumps(obj, unpicklable=False)
>>> print oneway
{"name": "A String", "child": null}
"""
__version__ = '0.0.5'
__all__ = [
'dump', 'dumps', 'load', 'loads'
]
from django.utils import simplejson as json
#import simplejson as json
from pickler import Pickler
from unpickler import Unpickler
def dump(value, file, **kwargs):
"""Saves a JSON formatted representation of value into file.
"""
j = Pickler(unpicklable=__isunpicklable(kwargs))
json.dump(j.flatten(value), file)
def dumps(value, **kwargs):
"""Returns a JSON formatted representation of value, a Python object.
Optionally takes a keyword argument unpicklable. If set to False,
the output does not contain the information necessary to
>>> dumps('my string')
'"my string"'
>>> dumps(36)
'36'
"""
j = Pickler(unpicklable=__isunpicklable(kwargs))
return json.dumps(j.flatten(value))
def load(file):
"""Converts the JSON string in file into a Python object
"""
j = Unpickler()
return j.restore(json.load(file))
def loads(string):
"""Converts the JSON string into a Python object.
>>> loads('"my string"')
u'my string'
>>> loads('36')
36
"""
j = Unpickler()
return j.restore(json.loads(string))
def __isunpicklable(kw):
"""Utility function for finding keyword unpicklable and returning value.
Default is assumed to be True.
>>> __isunpicklable({})
True
>>> __isunpicklable({'unpicklable':True})
True
>>> __isunpicklable({'unpicklable':False})
False
"""
if kw.has_key('unpicklable') and not kw['unpicklable']:
return False
return True
| gpl-2.0 | -2,917,782,080,440,255,000 | 24.945946 | 104 | 0.646606 | false |
CS2014/USM | usm/communications/migrations/0001_initial.py | 1 | 8519 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TextMessage'
db.create_table(u'communications_textmessage', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('society', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Society'])),
('recipients', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['societymembers.SocietyMember'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('tag', self.gf('django.db.models.fields.CharField')(max_length=30)),
('content', self.gf('django.db.models.fields.CharField')(max_length=160)),
('source_phone_number', self.gf('django.db.models.fields.CharField')(max_length=25)),
))
db.send_create_signal(u'communications', ['TextMessage'])
# Adding model 'Email'
db.create_table(u'communications_email', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('society', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Society'])),
('recipients', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['societymembers.SocietyMember'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('tag', self.gf('django.db.models.fields.CharField')(max_length=30)),
('content', self.gf('django.db.models.fields.CharField')(max_length=160)),
('sending_address', self.gf('django.db.models.fields.CharField')(max_length=60)),
))
db.send_create_signal(u'communications', ['Email'])
def backwards(self, orm):
# Deleting model 'TextMessage'
db.delete_table(u'communications_textmessage')
# Deleting model 'Email'
db.delete_table(u'communications_email')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'communications.email': {
'Meta': {'object_name': 'Email'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'recipients': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['societymembers.SocietyMember']"}),
'sending_address': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'society': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Society']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'communications.textmessage': {
'Meta': {'object_name': 'TextMessage'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'recipients': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['societymembers.SocietyMember']"}),
'society': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Society']"}),
'source_phone_number': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.society': {
'Meta': {'object_name': 'Society'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'societymembers.societymember': {
'Meta': {'object_name': 'SocietyMember'},
'email_address': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'join_date': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'society': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Society']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['societymembers.Tag']", 'symmetrical': 'False'})
},
u'societymembers.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'society': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Society']"})
}
}
complete_apps = ['communications'] | mit | 5,974,675,831,424,042,000 | 66.619048 | 195 | 0.568963 | false |
RussianOtter/networking | RoAIRC.py | 1 | 2810 | import sys, time, threading, RoA, string, random, socket
from random import randint
from binascii import hexlify
import DH
Ro = RoA.RoA(False)
port = 50000
key = "savsecro"*4
user = "[%s] " %raw_input("Username: ")
try:
socket.setdefaulttimeout(2)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("8.8.8.8", 53))
ip = sock.getsockname()[0]
sock.close()
except Exception as e:
ip = "127.0.0.1"
socket.setdefaulttimeout(1000)
def header(b="",c=""):
head = ip,b,c
return str(head) + "\x0f"
def encrypt(msg,key):
roa_out = Ro.encrypt(msg,key)
return str(roa_out)
def decrypt(roa_in):
try:
roa_in = eval(roa_in)
decrypted = Ro.decrypt(roa_in[0],roa_in[1],roa_in[2])
return decrypted
except Exception as e:
pass
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("", port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
def encryption_handler(packet,key):
p = packet.replace(key,"!!!key!!!")
p = p.replace(key[16:],"!!!sub!!!")
return p
def daemon_hound():
while 1:
data = s.recv(5000)
if eval(data.split("\x0f")[0])[1] == "direct" and ip in data:
try:
DH.hellman_client(eval(data.split("\x0f")[0])[2])
v = "h_"+eval(data.split("\x0f")[0])[0].replace(".","")
time.sleep(1)
exec "globals()['%s'] = DH.%s" % (v,v)
while 1:
globals()[v] = int(globals()[v])*32+6**2
if len(str(globals()[v])) >= 32:
break
globals()[v] = str(globals()[v])[:32]
except:
pass
globals()["stop"] = True
elif eval(data.split("\x0f")[0])[2] == "dm" and eval(data.split("\x0f")[0])[1] == ip:
print
print decrypt(data.replace("!!!key!!!",globals()[v]).replace("!!!sub!!!",globals()[v][16:]).split("\x0f")[1])
else:
data = data.split("\x0f")
d = decrypt(data[1])
if d != None:
print "\n"+d
daemon = threading.Thread(target=daemon_hound)
daemon.daemon = True
daemon.start()
print "[DAEMON] SNIFFING"
time.sleep(0.5)
DH.hellman()
time.sleep(0.5)
stop = False
while 1:
try:
msg = raw_input("\n=> ")
except:
break
if len(msg) == 0:
msg = "0"
data = header() + encrypt(user+msg,key)
d_host = raw_input("HOST => ")
direct = header("direct",d_host)
if len(d_host) > 7:
data = direct
if len(msg) > 0:
s.sendto(data, ("255.255.255.255", port))
if len(d_host) > 7:
while not stop:
time.sleep(0.5)
if len(d_host) > 7:
v = str("h_"+d_host.replace(".",""))
exec "globals()['%s'] = DH.%s" % (v,v)
while 1:
globals()[v] = int(globals()[v])*32+6**2
if len(str(globals()[v])) >= 32:
break
globals()[v] = str(globals()[v])[:32]
msg = raw_input("[%s] => " %d_host)
pack = encrypt("[Direct]" + user+msg,globals()[v])
pack = encryption_handler(pack,globals()[v])
s.sendto(header(d_host,"dm")+pack,("255.255.255.255", port))
time.sleep(1)
stop = False
| gpl-3.0 | 7,521,344,470,537,331,000 | 23.867257 | 112 | 0.6 | false |
gvanderheide/discreteMarkovChain | setup.py | 1 | 1495 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='discreteMarkovChain',
version='0.22',
description='Solve Markov chains with a discrete state space.',
long_description=long_description,
url='https://github.com/gvanderheide/discreteMarkovChain',
author='Gerlach van der Heide',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Operating System :: OS Independent',
],
keywords='Markov chain stochastic stationary steady state',
packages=find_packages(),
install_requires=['numpy','scipy'],
test_suite='tests',
)
| mit | 773,850,345,115,588,600 | 31.977273 | 67 | 0.637458 | false |
weaselkeeper/utils | AWS-utils/instance-report.py | 1 | 8003 | #!/usr/bin/env python
# vim: set expandtab:
# with thanks to https://github.com/bpennypacker/ec2-check-reserved-instances.git
# For the initial idea, and an idea of how to do this.
PROJECTNAME = 'ec2-check-reserved-instances'
import os
import sys
import ConfigParser
import logging
import boto
import boto.ec2
from pprint import pprint
from collections import defaultdict
# Setup logging
logging.basicConfig(level=logging.WARN,
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='%y.%m.%d %H:%M:%S')
# Setup logging to console.
console = logging.StreamHandler(sys.stderr)
console.setLevel(logging.WARN)
logging.getLogger(PROJECTNAME).addHandler(console)
log = logging.getLogger(PROJECTNAME)
AWS_REGIONS = [ 'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2' ]
INSTANCE_TYPES = [ 'c3.large',
'c4.xlarge',
'm1.small',
'm3.large',
'm3.medium',
'm3.xlarge',
'm4.4xlarge',
'm4.large',
'r3.2xlarge',
'r3.large',
'r3.xlarge',
't2.medium',
't2.nano',
't2.small', ]
def get_options():
""" Parse the command line options"""
import argparse
parser = argparse.ArgumentParser(
description='Instance-report reports on your ec2 reserved vs on demand instances')
parser.add_argument('-d', '--debug', action='store_true',
help='Enable debugging during execution.',
default=None)
parser.add_argument('-p', '--profile', action='store', help='Which AWS profile to use.')
parser.add_argument('-r', '--region', action='store', default='us-east-1',
help='Must be valid region in AWS_REGIONS list, if empty, defaults to us-east-1')
parser.add_argument('-N', '--names', help="Include names or instance IDs of instances that fit non-reservations", required=False, action='store_true')
parser.add_argument('-t', '--type', action='store', help='Specific instance type')
parser.add_argument('-R', '--report', action='store_true',help='instance report', default=False)
_args = parser.parse_args()
_args.usage = PROJECTNAME + ".py [options]"
return _args
def get_args():
""" we only run if called from main """
_args = get_options()
if _args.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.WARN)
if _args.type:
if _args.type not in INSTANCE_TYPES:
sys.exit('invalid instance type %s:' % _args.type)
return _args
def get_connection(args):
if args.profile:
if args.region:
ec2_conn = boto.ec2.connect_to_region( args.region, profile_name=args.profile )
else:
ec2_conn = boto.connect_ec2( profile_name=args.profile )
else:
if args.region:
ec2_conn = boto.ec2.connect_to_region( args.region )
else:
ec2_conn = boto.connect_ec2()
return ec2_conn
def get_report(conn,args):
reservations = conn.get_all_instances()
running_instances = {}
instance_ids = defaultdict(list)
for reservation in reservations:
for instance in reservation.instances:
# ignore spot instances and non running instances.
if instance.state != "running":
log.info("Non-running instance %s: Ignoring\n" % ( instance.id ) )
elif instance.spot_instance_request_id:
log.info("Spot instance %s: Ignoring\n" % ( instance.id ) )
else:
az = instance.placement
instance_type = instance.instance_type
if args.type is None or args.type == instance_type:
running_instances[ (instance_type, az ) ] = running_instances.get( (instance_type, az ) , 0 ) + 1
if "Name" in instance.tags and len(instance.tags['Name']) > 0:
instance_ids[ (instance_type, az ) ].append(instance.tags['Name'])
else:
instance_ids[ (instance_type, az ) ].append(instance.id)
reserved_instances = {}
for reserved_instance in conn.get_all_reserved_instances():
if reserved_instance.state != "active":
log.info( "Inactive reserved instance %s: \n" % ( reserved_instance.id ) )
else:
az = reserved_instance.availability_zone
instance_type = reserved_instance.instance_type
if args.type is None or args.type == instance_type:
reserved_instances[( instance_type, az) ] = reserved_instances.get ( (instance_type, az ), 0 ) + reserved_instance.instance_count
""" value is neg for on demand, pos for unused reservations. """
instance_diff = dict([(x, reserved_instances[x] - running_instances.get(x, 0 )) for x in reserved_instances])
for key in running_instances:
if not key in reserved_instances:
instance_diff[key] = -running_instances[key]
unused_reservations = dict((key,value) for key, value in instance_diff.iteritems() if value > 0)
if unused_reservations == {}:
if args.type:
print ("Congratulations, you have no unused reservations of type %s:" % args.type)
else:
print "Congratulations, you have no unused reservations"
else:
for unused_reservation in unused_reservations:
print "UNUSED RESERVATION!\t(%s)\t%s\t%s" % ( unused_reservations[ unused_reservation ], unused_reservation[0], unused_reservation[1] )
print ""
unreserved_instances = dict((key,-value) for key, value in instance_diff.iteritems() if value < 0)
if unreserved_instances == {}:
if args.type:
print ("Congratulations, you have no unreserved instances of type %s:" % args.type)
else:
print "Congratulations, you have no unreserved instances"
else:
ids=""
for unreserved_instance in unreserved_instances:
if args.names:
ids = ', '.join(sorted(instance_ids[unreserved_instance]))
print "Non-reserved:\t%s\t%s\t%s\t%s" % ( unreserved_instances[ unreserved_instance ], unreserved_instance[0], unreserved_instance[1], ids )
if running_instances.values():
qty_running_instances = reduce( lambda x, y: x+y, running_instances.values() )
else:
qty_running_instances = 0
if reserved_instances.values():
qty_reserved_instances = reduce( lambda x, y: x+y, reserved_instances.values() )
else:
qty_reserved_instances = 0
print "\n(%s) running on-demand instances\n(%s) reservations" % ( qty_running_instances, qty_reserved_instances )
if args.report:
all_keys = {}
for key in reserved_instances.keys():
all_keys[key] = 0
for key in running_instances.keys():
all_keys[key] = 0
for key in all_keys.keys():
try:
running = running_instances[key]
except KeyError:
running = 0
try:
reserved = reserved_instances[key]
except KeyError:
reserved = 0
all_keys[key] = (running,reserved)
print"type AZ running reserved over/under"
for key in all_keys.keys():
_type = key[0]
AZ = key[1]
running, reserved = all_keys[key]
miss = reserved - running
print ("%s %s %s %s %s" % (_type, AZ, running, reserved, miss ) )
# Here we start if called directly (the usual case.)
if __name__ == "__main__":
# This is where we will begin when called from CLI. No need for argparse
# unless being called interactively, so import it here
args = get_args()
# get the instance report
conn = get_connection(args)
sys.exit(get_report(conn,args))
| gpl-2.0 | 5,559,435,648,501,981,000 | 35.711009 | 154 | 0.591403 | false |
itissid/gv | pygerrit/gerrit_api.py | 1 | 4985 | import os
import urlparse
import json
import vim
import requests
from requests.auth import HTTPDigestAuth
from pygerrit.salting import check_hash, AuthObject
# Until I can get the kerberos auth figured out,
# I made a SSH Tunnel and just do all the things I want to.
XSSI_STRING = ")]}'"
PROJECT_JSON_KEY = 'project'
BRANCH_JSON_KEY = 'branch'
SUBJECT_JSON_KEY = 'subject'
CHANGE_ID_KEY = 'change_id'
def auth_and_get_result(query_string=None):
"""
Does some boiler plate managemnet for authorization and
getting data based on the query_string.
TODO(sid): Implement different types of auth schemes
based on variables set in the
"""
plugin_path = vim.eval("g:gv_plugin_root")
auth_method = vim.eval("g:gv_auth_method")
url_end_point = vim.eval("g:gv_root_url")
password_auth_method_name = vim.eval("g:gv_password_auth_scheme_name")
def _inner_auth_and_get_result(f):
def _inner_(*args, **kwargs):
#TODO(Sid): Instead of branches try calling a
# method that returns an instance of the AuthObject that you
# can use to make the REST requesnt?
if auth_method == password_auth_method_name:
# TODO(Sid): Remove me to another method
config = json.load(open(os.path.join(
plugin_path, 'config/config.json')))
auth_object = AuthObject(
password=config['password'],
username=config['username'])
basic_auth = (auth_object.username, auth_object.password)
digest_auth = HTTPDigestAuth(*basic_auth)
authentication_url = urlparse.urljoin(url_end_point, '/a')
# In case the query string was parameterized
if 'format_params' in kwargs:
_query_string = query_string.format(**kwargs['format_params'])
else:
_query_string = query_string
print _query_string
auth_response = requests.get(
authentication_url + _query_string,
auth=digest_auth)
if auth_response.status_code == 200:
kwargs['response'] = json.loads(
auth_response.text.replace(XSSI_STRING, ''))
else:
raise ValueError(
'Received non 200 response %d. Text: %s'
% (auth_response.status_code, auth_response.text))
else:
# Here will go the future auth schemes that gerrit requires.
raise ValueError('Auth scheme not recognized.')
return f(*args, **kwargs)
return _inner_
return _inner_auth_and_get_result
@auth_and_get_result(
query_string='/changes/?q=is:open+owner:self&o=CURRENT_REVISION')
def gerrit_status(response=None):
response.sort(key=lambda x: (
x[PROJECT_JSON_KEY],
x[BRANCH_JSON_KEY],
x[SUBJECT_JSON_KEY]))
# Create a buffer
gerrit_status_buffer_name = vim.eval('g:gerrit_status_buffer_name')
vim.command(":badd %s" % gerrit_status_buffer_name)
vim.command("vert sb %s" % gerrit_status_buffer_name)
vim.command(":setlocal buftype=nofile")
vim.command(":setlocal bufhidden=wipe")
vim.command(":setlocal modifiable")
vim.command("let g:gerrit_change_id_lookup={}")
gerrit_status_buffer = None
# Now write to the buffer
# TODO(Sid): Figure out the encoding issues
# Figure out a display format
for b in vim.buffers:
if b.name and b.name.find(gerrit_status_buffer_name) >= 0:
gerrit_status_buffer = b
gerrit_status_buffer[:] = None
gerrit_status_buffer.append(20 * "-")
for change in response:
project_name = change[PROJECT_JSON_KEY].encode('utf-8')
branch_name = change[BRANCH_JSON_KEY].encode('utf-8')
subject = change[SUBJECT_JSON_KEY].encode('utf-8')
change_id = change[CHANGE_ID_KEY].encode('utf-8')
partial_change_id = change_id[0:10]
revision_lookup_id = "%s~%s~%s" % (
project_name, branch_name,
change[CHANGE_ID_KEY].encode('utf-8'))
gerrit_status_buffer.append("P| %s" % project_name)
gerrit_status_buffer.append((" B| %s" % branch_name))
gerrit_status_buffer.append(
(" S| (%s..) %s" % (partial_change_id, subject)))
store_command = ":let g:gerrit_change_id_lookup[\"%s\"] = \"%s\"" % (
partial_change_id, revision_lookup_id)
vim.command(store_command)
gerrit_status_buffer.append(20 * "-")
vim.command(":setlocal noma")
@auth_and_get_result(
query_string='/changes/{change_id}/revisions/{revision_id}/files/')
def display_change_contents(
response=None, format_params={}):
"""
Given an item selected from the gerrit status
change window. If you invoke the right key bindings
you can see the changes that are part of the latest patch
You must call this function with a non null value of the change_id and the
revision_id.
Here is the documentation for this:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-files
format_params should have the following =
{'change_id': None, 'revision_id': None}
"""
print response
@auth_and_get_result(
query_string='/changes/{change_id}/revisions/{revision_id}/files/{file_id}/contents')
def display_file_contents(
response=None, format_params={}):
"""
format_params should have the following = {
'change_id': ..., 'revision_id': ..., 'file_id': ...
}
"""
print response
| gpl-2.0 | -2,036,446,997,875,048,700 | 32.911565 | 87 | 0.694283 | false |
utn-frm-si/reservas | reservas/settings/base.py | 1 | 4810 | # coding=utf-8
"""
Django settings for reservas project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import random
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Establece la clave secreta a partir de la variable de entorno 'DJANGO_SECRET_KEY', o genera una
# clave aleatoria si ésta no se encuentra seteada.
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY',
''.join([random.SystemRandom()
.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
for i in range(50)]))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Administradores del proyecto.
ADMINS = []
MANAGERS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djangobower',
'app_facturacion',
'app_reservas.apps.ReservasConfig',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'reservas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'reservas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Establece el prefijo para el proyecto Django, según la configuración
# del servidor web.
DJANGO_URL_PREFIX = os.environ.get('DJANGO_URL_PREFIX', '')
# Da formato al prefijo URL, para que sea de la forma '<prefijo>/'.
# 1. Quita las barras iniciales y finales, por si el prefijo cuenta con más de una.
DJANGO_URL_PREFIX = DJANGO_URL_PREFIX.strip('/')
# 2. Añade una única barra final, en caso de que el prefijo no haya quedado vacío luego de la
# operación anterior.
if DJANGO_URL_PREFIX:
DJANGO_URL_PREFIX += '/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/' + DJANGO_URL_PREFIX + 'static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/' + DJANGO_URL_PREFIX + 'media/'
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'components')
BOWER_INSTALLED_APPS = (
'bootstrap-datepicker',
'bootswatch-dist#flatly',
'font-awesome',
'fullcalendar-scheduler',
'handsontable',
'jquery',
'pace',
'qtip2',
'slick-carousel',
)
# Token de Google Calendar, utilizado para consultar la información de eventos
# de los calendarios de Google Calendar.
GOOGLE_CALENDAR_TOKEN = os.environ.get('GOOGLE_CALENDAR_TOKEN', '')
BROKER_URL = os.environ.get('BROKER_URL', 'amqp://guest:guest@rabbit//')
| mit | 2,310,515,445,167,360,000 | 27.921687 | 98 | 0.684857 | false |
stmobo/Machine-Learning | generative-waifu-network/discriminator_network.py | 1 | 7668 | import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import selu
from common import *
def discriminator_parameters(parser):
group = parser.add_argument_group('Discriminator Network Parameters', description='These parameters control the structure of the discriminator / critic network.')
group.add_argument('--inception-modules', action='store_true', help='Discriminator network will be built from Inception modules if true')
group.add_argument('--dsc-final-depth', type=int, default=64, help='Final number of discirminator conv filters (before image output layer)')
group.add_argument('--dsc-bottleneck-depth', type=int, default=64, help='Bottleneck layer depth for Inception modules')
group.add_argument('--dsc-layers', type=int, default=4, help='Number of discriminator conv layers (not including output layer)')
group.add_argument('--dsc-activation', help='Activation function to use for discriminator network')
group.add_argument('--dsc-weight-clip', type=float, default=1e-2, help='Critic network weight clipping values (for Wasserstein GANs)')
group.add_argument('--dsc-normalizer', help='Normalization function to use in network layers (valid values are \'batch\', \'layer\', \'none\')')
class Discriminator:
def __init__(self, args, image_in):
self.args = args
self.image = image_in
def build(self, scope='Discriminator', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
net = self.input_layer(self.image, self.labels)
for layer in range(self.args.dsc_layers):
current_layer_depth = self.args.dsc_final_depth // (2 ** (self.args.dsc_layers - layer - 1))
if self.args.inception_modules:
net = self.inception_module(net, current_layer_depth, scope='Inception{:d}'.format(layer))
else:
net = self.conv_layer(net, current_layer_depth, scope='Conv{:d}'.format(layer))
self.critic, self.pred_labels = self.output_layer(net)
self.vars = slim.get_trainable_variables(scope=scope)
return self.critic, self.pred_labels
def activation_fn(self):
if self.args.dsc_activation == 'relu':
return tf.nn.relu
elif self.args.dsc_activation == 'lrelu':
return leaky_relu
elif self.args.dsc_activation == 'selu':
return selu.selu
raise ValueError("Invalid value for --dsc-activation: " + self.args.dsc_activation)
def initializer_fn(self):
if self.args.dsc_activation == 'relu' or self.args.dsc_activation == 'lrelu':
return None
else:
return selu.initializer
def normalizer_fn(self):
if self.args.dsc_normalizer == 'batch':
return slim.fused_batch_norm
elif self.args.dsc_normalizer == 'layer':
return slim.layer_norm
elif self.args.dsc_normalizer == 'none':
return None
raise ValueError("Invalid value for --dsc-normalizer: " + self.args.dsc_normalizer)
def network_arg_scope(self):
return slim.arg_scope([slim.fully_connected, slim.conv2d, slim.conv2d_transpose], activation_fn=self.activation_fn(), weights_initializer=self.initializer_fn())
def clip_network_weights(self, apply_grad_ops):
with tf.control_dependencies([apply_grad_ops]):
clipped_weights = [tf.clip_by_value(w, -args.dsc_weight_clip, args.dsc_weight_clip) for w in self.vars]
return tf.group(*clipped_weights)
def input_gradient_norms(self):
input_grads = tf.gradients(self.out, self.image, name='dsc-input-gradients')
return tf.norm(input_grads, axis=(1,2)) # Note: Compute norms over height and width (axes 1,2 for NHWC and 2,3 for NCHW)
def inception_module(self, tensor_in, output_depth, scope='Inception'):
batch_size, input_height, input_width, input_depth = tensor_in.shape.as_list()
output_height, output_width = conv_output_size(input_height, 2), conv_output_size(input_width, 2)
bottleneck_depth = self.args.dsc_bottleneck_depth
head_depth = self.args.head_depth // 4
with slim.arg_scope(self.network_arg_scope()):
head_pool = slim.avg_pool2d(tensor_in, kernel_size=3, stride=2, padding='SAME', scope='AvgPool')
head_1x1 = slim.conv2d(tensor_in, head_depth, kernel_size=1, stride=2, scope='Conv1x1')
head_3x3 = slim.conv2d(tensor_in, bottleneck_depth, kernel_size=1, scope='Conv3x3-BN')
head_5x5 = slim.conv2d(tensor_in, bottleneck_depth, kernel_size=1, scope='Conv5x5-BN')
head_pool = slim.conv2d(head_pool, head_depth, kernel_size=1, scope='AvgPool-BN')
head_3x3 = slim.conv2d(head_3x3, head_depth, kernel_size=3, scope='Conv3x3')
# 5x5 conv as stacked 3x3 convolutions
head_5x5 = slim.conv2d(head_5x5, head_depth, kernel_size=3, scope='Conv5x5-1')
head_5x5 = slim.conv2d(head_5x5, head_depth, kernel_size=3, scope='Conv5x5-2')
head_pool = tf.reshape(head_pool, [batch_size, output_height, output_width, head_depth])
head_1x1 = tf.reshape(head_1x1, [batch_size, output_height, output_width, head_depth])
head_3x3 = tf.reshape(head_3x3, [batch_size, output_height, output_width, head_depth])
head_5x5 = tf.reshape(head_5x5, [batch_size, output_height, output_width, head_depth])
out = tf.concat([head_pool, head_1x1, head_3x3, head_5x5], axis=3)
norm_fn = self.normalizer_fn()
if norm_fn is not None:
out = norm_fn(out)
return out
def conv_layer(self, tensor_in, output_depth, scope='Conv'):
batch_size, input_height, input_width, input_depth = tensor_in.shape.as_list()
output_height, output_width = conv_output_size(input_height, 2), conv_output_size(input_width, 2)
with slim.arg_scope(self.network_arg_scope()):
net = slim.conv2d(tensor_in, output_depth, kernel_size=3, stride=2, normalizer_fn=self.normalizer_fn(), scope=scope)
net = tf.reshape(net, [batch_size, output_height, output_width, output_depth])
return net
def input_layer(self, image_in, labels_in):
batch_size, input_height, input_width, input_depth = image_in.shape.as_list()
output_height, output_width = conv_output_size(input_height, 2), conv_output_size(input_width, 2)
output_depth = self.args.dsc_final_depth // (2 ** self.args.dsc_layers)
with slim.arg_scope(self.network_arg_scope()):
net = slim.conv2d(image_in, output_depth, kernel_size=3, scope='Input-Conv1')
net = slim.conv2d(net, output_depth, kernel_size=3, stride=2, scope='Input-Conv2')
# projected_labels = slim.fully_connected(labels_in, output_height * output_width, scope='LabelProjection')
# projected_labels = tf.reshape(projected_labels, [batch_size, output_height, output_width, 1])
# net = tf.concat([net, projected_labels], axis=3)
return net
def output_layer(self, tensor_in):
batch_size, input_height, input_width, input_depth = image_in.shape.as_list()
flat_in = tf.reshape(tensor_in, [batch_size, -1])
critic_out = slim.fully_connected(
flat_in,
1,
activation_fn=None,
scope='Critic',
)
labels_out = slim.fully_connected(
flat_in,
self.args.label_size,
activation_fn=None,
scope='PredLabels',
)
return critic_out, labels_out
| mit | -7,778,606,382,283,700,000 | 48.792208 | 168 | 0.64254 | false |
letsmeet-click/letsmeet.click | letsmeet/tests/test_home.py | 1 | 1580 | generic_menu_items = [
'Home', 'All Communities', 'Contact', 'About/FAQ', 'Legal/Contact',
]
logged_out_menu_items = ['Login']
logged_in_menu_items = ['Logout']
advertisements = [
'No registration required',
'Organize your event attendees',
'Use your existing communication channels',
'It\'s free',
'Kickstart your event',
]
dashboard = [
'You have no upcoming events.',
'You currently have no active community subscriptions.',
'<noscript>0</noscript> Communities',
'<noscript>0</noscript> upcoming confirmed events',
'Currently no upcoming events',
]
def test_home(client):
resp = client.get('/')
assert resp.status_code == 200
for menu_item in generic_menu_items + logged_out_menu_items:
assert menu_item.encode() in resp.content
for menu_item in logged_in_menu_items:
assert menu_item.encode() not in resp.content
for advertisement in advertisements:
assert advertisement.encode() in resp.content
for item in dashboard:
assert item.encode() not in resp.content
def test_home_logged_in(logged_in_client):
resp = logged_in_client.get('/')
assert resp.status_code == 200
for menu_item in generic_menu_items + logged_in_menu_items:
assert menu_item.encode() in resp.content
for menu_item in logged_out_menu_items:
assert menu_item.encode() not in resp.content
for advertisement in advertisements:
assert advertisement.encode() not in resp.content
for item in dashboard:
assert item.encode() in resp.content
| mit | 5,441,017,305,872,724,000 | 25.779661 | 71 | 0.672785 | false |
glumu/django-redis-cluster | django_redis_cluster/cache.py | 1 | 4615 | #coding:utf8
import functools
import logging
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from .utils import load_class
from .exceptions import ConnectionInterrupted
# 错误异常配置
DJANGO_REDIS_CLUSTER_IGNORE_EXCEPTIONS = getattr(settings, 'DJANGO_REDIS_CLUSTER_IGNORE_EXCEPTIONS', False)
DJANGO_REDIS_CLUSTER_LOG_IGNORED_EXCEPTIONS = getattr(settings, 'DJANGO_REDIS_CLUSTER_LOG_IGNORED_EXCEPTIONS', False)
DJANGO_REDIS_CLUSTER_LOGGER = getattr(settings, 'DJANGO_REDIS_CLUSTER_LOGGER', False)
if DJANGO_REDIS_CLUSTER_IGNORE_EXCEPTIONS:
logger = logging.getLogger((DJANGO_REDIS_CLUSTER_LOGGER or __name__))
def omit_exception(method=None, return_value=None):
'''连接异常处理修饰器'''
if method is None:
return functools.partial(omit_exception, return_value=return_value)
@functools.wraps(method)
def _decorator(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except ConnectionInterrupted as e:
if self._ignore_exceptions:
if DJANGO_REDIS_CLUSTER_LOG_IGNORED_EXCEPTIONS:
logger.error(str(e))
return return_value
raise e.parent
return _decorator
class RedisClusterCache(BaseCache):
def __init__(self, server, params):
super(RedisClusterCache, self).__init__(params)
self._server = server
self._params = params
options = params.get('OPTIONS', {})
self._client_cls = options.get("CLIENT_CLASS", "django_redis_cluster.client.DefaultClient")
self._client_cls = load_class(self._client_cls)
self._client = None
self._ignore_exceptions = options.get('IGNORE_EXCEPTIONS', DJANGO_REDIS_CLUSTER_IGNORE_EXCEPTIONS)
@property
def client(self):
'''
Lazy client connection property
'''
if self._client is None:
self._client = self._client_cls(self._server, self._params, self)
return self._client
@omit_exception
def add(self, *args, **kwargs):
return self.client.add(*args, **kwargs)
@omit_exception
def set(self, *args, **kwargs):
return self.client.set(*args, **kwargs)
@omit_exception
def set_many(self, *args, **kwargs):
return self.client.set_many(*args, **kwargs)
@omit_exception
def get(self, key, default=None, version=None):
try:
return self.client.get(key, default=default, version=version)
except ConnectionInterrupted as e:
if DJANGO_REDIS_CLUSTER_LOG_IGNORED_EXCEPTIONS:
logger.error(str(e))
return default
raise
@omit_exception(return_value={})
def get_many(self, *args, **kwargs):
return self.client.get_many(*args, **kwargs)
@omit_exception
def delete(self, *args, **kwargs):
return self.client.delete(*args, **kwargs)
@omit_exception
def delete_pattern(self, *args, **kwargs):
return self.client.delete_pattern(*args, **kwargs)
@omit_exception
def delete_many(self, *args, **kwargs):
return self.client.delete_many(*args, **kwargs)
@omit_exception
def clear(self):
return self.client.clear()
@omit_exception
def keys(self, *args, **kwargs):
return self.client.keys(*args, **kwargs)
@omit_exception
def has_key(self, *args, **kwargs):
return self.client.has_key(*args, **kwargs)
@omit_exception
def lock(self, *args, **kwargs):
return self.client.lock(*args, **kwargs)
@omit_exception
def incr_version(self, *args, **kwargs):
return self.client.incr_version(*args, **kwargs)
@omit_exception
def incr(self, *args, **kwargs):
return self.client.incr(*args, **kwargs)
@omit_exception
def decr(self, *args, **kwargs):
return self.client.decr(*args, **kwargs)
@omit_exception
def iter_keys(self, *args, **kwargs):
return self.client.iter_keys(*args, **kwargs)
@omit_exception
def ttl(self, *args, **kwargs):
return self.client.ttl(*args, **kwargs)
@omit_exception
def persist(self, *args, **kwargs):
return self.client.persist(*args, **kwargs)
@omit_exception
def expire(self, *args, **kwargs):
return self.client.expire(*args, **kwargs)
| bsd-3-clause | 1,818,026,592,783,556,900 | 26.793939 | 117 | 0.604144 | false |
CSC-IT-Center-for-Science/chipster-job-manager | jobmanager/tests/test_utils.py | 1 | 2079 | from jobmanager.utils import parse_msg_body
class TestUtils(object):
def setUp(self):
pass
def test_parse_msg_body(self):
d1 = {"map": {"entry": [{"string": ["command", "choose"]}, {"string": ["named-parameter-value1", "8a98c0c3-5560-41df-8b86-67435b25d565"]}, {"string": ["named-parameter-key1", "as-id"]}, {"string": ["named-parameter-value0", "bab581a8-0d74-47e1-a753-18b4ad032153"]}, {"string": ["named-parameter-key0", "job-id"]}]}}
r1 = parse_msg_body(d1)
c1 = {'job-id': 'bab581a8-0d74-47e1-a753-18b4ad032153', 'as-id': '8a98c0c3-5560-41df-8b86-67435b25d565', 'command': 'choose'}
assert r1 == c1
d2 = {"map": {"entry": [{"string": ["payload_input", "214a6078-9ee9-476c-932d-d073fc7658fd"]}, {"string": ["analysisID", "test-data-in.R"]}, {"string": ["jobID", "bab581a8-0d74-47e1-a753-18b4ad032153"]}]}}
r2 = parse_msg_body(d2)
c2 = {'analysisID': 'test-data-in.R', 'payload_input': '214a6078-9ee9-476c-932d-d073fc7658fd', 'jobID': 'bab581a8-0d74-47e1-a753-18b4ad032153'}
assert r2 == c2
d3 = {u'map': {u'entry': [{u'null': u'', u'string': u'errorMessage'}, {u'string': [u'jobId', u'7f03370e-714d-450d-8707-4cf4b478fadf']}, {u'string': [u'outputText', u'''data-input-test.txt")''']}, {u'string': [u'sourceCode', u'chipster.tools.path = "aa")']}, {u'string': [u'stateDetail', u'transferring output data']}, {u'string': [u'exitState', u'RUNNING']}]}}
r3 = parse_msg_body(d3)
c3 = {u'outputText': u'data-input-test.txt")', u'jobId': u'7f03370e-714d-450d-8707-4cf4b478fadf', u'sourceCode': u'chipster.tools.path = "aa")', u'exitState': u'RUNNING', u'stateDetail': u'transferring output data'}
assert r3 == c3
d4 = {u'map': {u'entry': [{u'string': [u'command', u'cancel']}, {u'string': [u'named-parameter-value0', u'96f7ab23-2fa8-4a81-8097-de99af1c74fa']}, {u'string': [u'named-parameter-key0', u'job-id']}]}}
r4 = parse_msg_body(d4)
c4 = {u'job-id': u'96f7ab23-2fa8-4a81-8097-de99af1c74fa', u'command': u'cancel'}
assert r4 == c4
| mit | -6,488,086,427,102,607,000 | 85.625 | 368 | 0.617605 | false |
ktarrant/options_csv | heatmap.py | 1 | 3194 | import plotly.plotly as py
import plotly.graph_objs as go
from options_csv import OUTPUT_FILENAME_PREFIX_FORMAT
import re
import os
import pandas as pd
def build_heatmap(openInt_df):
trace = go.Heatmap(z=openInt_df.fillna(0).as_matrix(),
x=openInt_df.columns,
y=openInt_df.index)
data=[trace]
layout = go.Layout(
zaxis=dict(
type='log',
autorange=True
)
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='expiration-heatmap')
def build_colorscale(openInt_df):
data = [{
'x': openInt_df.columns,
'y': openInt_df.index,
'z': openInt_df.fillna(0).as_matrix(),
'type': 'heatmap',
'colorscale': [
# [0, 'rgb(250, 250, 250)'], #0
# [1./10000, 'rgb(200, 200, 200)'], #10
# [1./1000, 'rgb(150, 150, 150)'], #100
# [1./100, 'rgb(100, 100, 100)'], #1000
# [1./10, 'rgb(50, 50, 50)'], #10000
# [1., 'rgb(0, 0, 0)'], #100000
[0, 'rgb(250, 250, 250)'], #0
[1./16.0, 'rgb(200, 200, 200)'], #10
[1./8.0, 'rgb(150, 150, 150)'], #100
[1./4.0, 'rgb(100, 100, 100)'], #1000
[1./2.0, 'rgb(50, 50, 50)'], #10000
[1., 'rgb(0, 0, 0)'], #100000
],
'colorbar': {
'tick0': 0,
'tickmode': 'array',
'tickvals': [0, 1000, 10000, 100000]
}
}]
layout = {'title': 'Log Colorscale'}
fig = {'data': data, 'layout': layout}
py.plot(fig, filename='expiration-heatmap')
def get_combined_options_data(csv_date, symbol, indir):
expected_prefix = OUTPUT_FILENAME_PREFIX_FORMAT.format(date=csv_date, ticker=symbol)
options_tables = {}
for root, dirs, files in os.walk(indir):
for file in files:
if file.startswith(str(csv_date)) and file.endswith(".csv"):
expiration_with_ext = file.split("_")[-1]
expiration = expiration_with_ext.split(".")[0][3:]
filename = os.path.join(root, file)
options_table = pd.DataFrame.from_csv(filename)
options_tables[expiration] = options_table
return pd.Panel(options_tables)
if __name__ == "__main__":
import argparse
import datetime
parser = argparse.ArgumentParser(description="Makes a heatmap from a sequence of expirations")
parser.add_argument("--symbol", default="spx", help="Symbol in CSV files")
parser.add_argument("--indir", default=os.getcwd(), help="Directory to look for CSV files")
args = parser.parse_args()
# TODO: Add date selection (just use today for now...)
args.csv_date = datetime.date.today()
options_panel = get_combined_options_data(args.csv_date, args.symbol, args.indir)
print(options_panel)
call_openInt = options_panel.minor_xs('call_Open Int.')
print(call_openInt)
build_colorscale(call_openInt)
| mit | 7,383,383,247,199,349,000 | 36.576471 | 194 | 0.520977 | false |
exaroth/subrosa | subrosa/models/UserImagesModel.py | 1 | 2702 | # -*- coding: utf-8 -*-
"""
subrosa.models.ArticlesModel
=========================
Implements model and methods related to subrosa images
:copyright: (c) 2014 by Konrad Wasowicz
:license: MIT, see LICENSE for details.
"""
import datetime
from peewee import *
from subrosa.models.BaseModel import BaseModel
from subrosa.models.UsersModel import Users
from subrosa.helpers import handle_errors
from subrosa import db
class UserImages(BaseModel):
image_link = TextField()
date_added = DateTimeField(default=datetime.datetime.utcnow())
delete_hash = TextField(null=True)
description = TextField(null=True)
is_vertical = IntegerField(null=True)
gallery = BooleanField(default=False)
imgur_img = BooleanField(default=False)
owner = ForeignKeyField(Users, related_name="images")
@staticmethod
def get_image(id):
return UserImages.get_single("id", id)
@staticmethod
def check_exists(image_link):
return (UserImages.select()
.where(UserImages.image_link == image_link)
.exists())
@staticmethod
def get_gallery_images(page, per_page, username=None, gallery=False):
q = UserImages.select()
if username:
q = q.join(Users).where(Users.username == username)
if gallery:
return q.where(UserImages.gallery == True).paginate(page, per_page)
return q.paginate(page, per_page)
@staticmethod
@db.commit_on_success
def gallerify(image):
try:
is_gallerified = image.gallery
image.gallery = not is_gallerified
image.save()
except Exception as e:
handle_errors("Error updating image")
@staticmethod
@db.commit_on_success
def add_image(image_link,
description,
owner,
is_vertical=True,
imgur_img=False,
delete_hash=None):
try:
UserImages.create(
image_link=image_link,
description=description,
is_vertical=is_vertical,
owner=owner,
imgur_img=imgur_img,
delete_hash=delete_hash,
)
return 1
except Exception as e:
handle_errors("Error creating image")
raise
@staticmethod
@db.commit_on_success
def delete_image(image):
try:
image.delete_instance()
return 1
except Exception as e:
handle_errors("Error deleting image")
raise
def __repr__(self):
return "<Image: {0}>".format(self.image_link)
| gpl-3.0 | -7,331,649,137,862,401,000 | 26.55102 | 79 | 0.584815 | false |
jlindenger/python-http-client | setup.py | 1 | 1244 | import sys
import os
from setuptools import setup
long_description = 'Please see our GitHub README'
if os.path.exists('README.txt'):
long_description = open('README.txt').read()
def getRequires():
deps = []
if (2, 6) <= sys.version_info < (2, 7):
deps.append('unittest2')
return deps
base_url = 'https://github.com/sendgrid/'
version = '3.0.0'
setup(
name='python_http_client',
version=version,
author='Elmer Thomas',
author_email='[email protected]',
url='{0}python-http-client'.format(base_url),
download_url='{0}python-http-client/tarball/{1}'.format(base_url, version),
packages=['python_http_client'],
license='MIT',
description='HTTP REST client, simplified for Python',
long_description=long_description,
install_requires=getRequires(),
keywords=[
'REST',
'HTTP',
'API'],
classifiers=[
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
)
| mit | -5,175,553,856,241,423,000 | 28.619048 | 79 | 0.614148 | false |
rubennj/persistence | persistence.py | 1 | 9060 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 20:09:35 2017
@author: ruben
"""
import os
from functools import wraps
import pandas as pd
def persist_timeseries_to_file(filename_cache=None):
"""
Persits a Pandas DataFrame object returned by a function into cache file
using a decorator, so it decorates the function that returns the
pd.DataFrame.
The function receives some extra parameters to be used by the
decorator (and to make it explicit it is advised to add them in the
definition of the function even if they are not used in the non-cached
version). This approach allows to modify them in each instance of the
function:
- enable_cache=False : actually enables the cache (allows to choose it)
- path_cache=None : path where the cache file is saved. If None, it takes
the current path
- update_cache=False : It forces to update the cache file, even if there
are data in it
Also time : pd.DatetimeIndex that is the index of the pd.DataFrame should
be the name of the parameter in the original function
Parameters
----------
filename_cache : String, default None
Name of cache file
Returns
-------
decorator : function
Function that will persist data into cache file
"""
if filename_cache is None:
raise ValueError('A cache-file name is required.')
persistence_type = filename_cache.split('.')[1]
def decorator(original_func):
"""
Decorator function
"""
# The main intended use for @wraps() is to wrap the decorated function
# and return the wrapper.
# If the wrapper function is not updated, the metadata of the returned
# function will reflect the wrapper definition rather than the original
# function definition, which is typically less than helpful.
@wraps(original_func)
def new_func(time, enable_cache=False, path_cache=None,
update_cache=False, verbose_cache=False, *args, **kwargs):
"""
Decorated function
"""
if not enable_cache:
return original_func(time, *args, **kwargs)
if path_cache is None:
path_cache = os.path.abspath('')
if not os.path.exists(path_cache):
os.makedirs(path_cache)
path_file_cache = os.path.join(path_cache, filename_cache)
if verbose_cache:
print('> Path cache:', path_file_cache)
try:
if persistence_type == 'csv':
cache = pd.read_csv(path_file_cache, index_col=0, parse_dates=True)
elif persistence_type == 'pickle':
cache = pd.read_pickle(path_file_cache)
elif persistence_type == 'json':
cache = pd.read_json(path_file_cache)
else:
raise ValueError('Unknown type of persistence', persistence_type)
if verbose_cache:
print('> Reading cache...')
except (IOError, ValueError):
if verbose_cache:
print('> Cache empty')
cache = pd.DataFrame()
if not update_cache:
if time.isin(cache.index).all():
data = cache.loc[time]
if verbose_cache:
print('> Cache contains requested data')
else:
if verbose_cache:
print('> Reading data source...')
data = original_func(time, **kwargs)
if not data.empty:
if persistence_type == 'csv':
pd.concat([data, cache], join='inner').to_csv(path_file_cache)
elif persistence_type == 'pickle':
pd.concat([data, cache], join='inner').to_pickle(path_file_cache)
elif persistence_type == 'json':
pd.concat([data, cache], join='inner').to_json(path_file_cache)
else:
raise ValueError('Unknown type of persistence', persistence_type)
if verbose_cache:
print('> Updating cache with requested data...')
else:
if verbose_cache:
print('> Cache not updated because requested data is empty')
else:
data = original_func(time, **kwargs)
if persistence_type == 'csv':
data.to_csv(path_file_cache)
elif persistence_type == 'pickle':
data.to_pickle(path_file_cache)
elif persistence_type == 'json':
data.to_json(path_file_cache)
if verbose_cache:
print('> Saving data in cache...')
return data
return new_func
return decorator
def persist_df_to_file(filename_cache=None):
"""
Persits a Pandas DataFrame object returned by a function into cache file
using a decorator, so it decorates the function that returns the
pd.DataFrame.
The function receives some extra parameters to be used by the
decorator (and to make it explicit it is advised to add them in the
definition of the function even if they are not used in the non-cached
version). This approach allows to modify them in each instance of the
function:
- enable_cache=False : actually enables the cache (allows to choose it)
- path_cache=None : path where the cache file is saved. If None, it takes
the current path
- update_cache=False : It forces to update the cache file, even if there
are data in it
Parameters
----------
filename_cache : String, default None
Name of cache file
Returns
-------
decorator : function
Function that will persist data into cache file
"""
if filename_cache is None:
raise ValueError('A cache-file name is required.')
persistence_type = filename_cache.split('.')[1]
def decorator(original_func):
"""
Decorator function
"""
# The main intended use for @wraps() is to wrap the decorated function
# and return the wrapper.
# If the wrapper function is not updated, the metadata of the returned
# function will reflect the wrapper definition rather than the original
# function definition, which is typically less than helpful.
@wraps(original_func)
def new_func(enable_cache=False, path_cache=None,
update_cache=False, verbose_cache=False, *args, **kwargs):
"""
Decorated function
"""
if not enable_cache:
return original_func(*args, **kwargs)
if path_cache is None:
path_cache = os.path.abspath('')
if not os.path.exists(path_cache):
os.makedirs(path_cache)
path_file_cache = os.path.join(path_cache, filename_cache)
if verbose_cache:
print('> Path cache:', path_file_cache)
try:
if persistence_type == 'csv':
cache = pd.read_csv(path_file_cache, index_col=0, parse_dates=True)
elif persistence_type == 'pickle':
cache = pd.read_pickle(path_file_cache)
elif persistence_type == 'json':
cache = pd.read_json(path_file_cache)
else:
raise ValueError('Unknown type of persistence', persistence_type)
if verbose_cache:
print('> Reading cache...')
except (IOError, ValueError):
if verbose_cache:
print('> Cache empty')
cache = pd.DataFrame()
if not update_cache and not cache.empty:
data = cache
if verbose_cache:
print('> Cache contains data')
else:
data = original_func(**kwargs)
if persistence_type == 'csv':
data.to_csv(path_file_cache)
elif persistence_type == 'pickle':
data.to_pickle(path_file_cache)
elif persistence_type == 'json':
data.to_json(path_file_cache)
if verbose_cache:
print('> Saving data in cache...')
return data
return new_func
return decorator | mit | -3,302,659,504,509,129,700 | 37.920705 | 93 | 0.528918 | false |
mansonul/events | events/contrib/plugins/form_elements/fields/date_drop_down/base.py | 1 | 2604 | from __future__ import absolute_import
from django.forms.extras.widgets import SelectDateWidget
from django.forms.fields import DateField
from django.utils.translation import ugettext_lazy as _
from fobi.base import FormFieldPlugin, get_theme
from . import UID
from .forms import DateDropDownInputForm
__title__ = 'fobi.contrib.plugins.form_elements.fields.' \
'date_drop_down.base'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('DateDropDownInputPlugin',)
theme = get_theme(request=None, as_instance=True)
class DateDropDownInputPlugin(FormFieldPlugin):
"""Date drop down field plugin."""
uid = UID
name = _("Date drop down")
group = _("Fields")
form = DateDropDownInputForm
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
"""Get form field instances."""
widget_attrs = {
'class': theme.form_element_html_class,
'type': 'date',
}
years = None
if self.data.year_min and self.data.year_max:
years = range(self.data.year_min, self.data.year_max)
field_kwargs = {
'label': self.data.label,
'help_text': self.data.help_text,
'initial': self.data.initial,
# 'input_formats': self.data.input_formats,
'required': self.data.required,
'widget': SelectDateWidget(attrs=widget_attrs, years=years),
}
# if self.data.input_formats:
# kwargs['input_formats'] = self.data.input_formats
return [(self.data.name, DateField, field_kwargs)]
def submit_plugin_form_data(self, form_entry, request, form,
form_element_entries=None, **kwargs):
"""Submit plugin form data/process.
:param fobi.models.FormEntry form_entry: Instance of
``fobi.models.FormEntry``.
:param django.http.HttpRequest request:
:param django.forms.Form form:
"""
# In case if we should submit value as is, we don't return anything.
# In other cases, we proceed further.
# Get the object
value = form.cleaned_data.get(self.data.name, None)
try:
value = value.strftime("%Y-%m-%d")
except Exception as err:
pass
# Overwrite ``cleaned_data`` of the ``form`` with object qualifier.
form.cleaned_data[self.data.name] = value
return form
| mit | 6,885,615,648,063,343,000 | 32.384615 | 76 | 0.610983 | false |
myt00seven/svrg | svrg_bn/draw_4_bndecay_smaller.py | 1 | 15643 | # This is used to draw three comparisons for SGD+BN, SVRG+BN and Streaming SVRG +BN
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import pylab
import numpy as np
import sys
# all the methods are with BN layers!!!
def deminish(a) :
end_factor=0.4
length = len(a)
for i in range(length):
a[i] = a[i] * (1-(1-end_factor)*(i/float(length)))
return a
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
PATH_DATA_adagrad = "data/"
PATH_DATA_SVRG = "data/"
PATH_DATA_Stream = "data/"
PATH_DATA = "data/"
PATH_FIGURE = "figure_bndecay/"
STARTPOINT = 5
LINEWIDTH = 2
DRAW_COMPARE = True
DRAW_Line1 = True
LOAD_SVRG = True
DRAW_Line1 = True
DRAW_Line2 = True
DRAW_Line3 = True
DRAW_Line4 = True
Y_LIM_FINE_TUNING = True
# Number of Moving Average
# SPEC_L1 = 'bo-'
# SPEC_L1 = 'g^--'
# SPEC_L2 = 'cs:'
# SPEC_L4 = 'r*-.'
SPEC_L1 = 'b-'
SPEC_L2 = 'c:'
SPEC_L3 = 'r-.'
SPEC_L4 = 'g--'
NUM_EPOCHS = 200
def main(num_epochs=NUM_EPOCHS):
if DRAW_COMPARE:
str_epochs = str(num_epochs)
if DRAW_Line1: Line1_acc_test= np.loadtxt(PATH_DATA +"ratio_0.25_acc_test.txt")
if DRAW_Line1: Line1_acc_train= np.loadtxt(PATH_DATA +"ratio_0.25_acc_train.txt")
if DRAW_Line1: Line1_acc_val= np.loadtxt(PATH_DATA +"ratio_0.25_acc_val.txt")
if DRAW_Line1: Line1_loss_test= np.loadtxt(PATH_DATA +"ratio_0.25_loss_test.txt")
if DRAW_Line1: Line1_loss_train= np.loadtxt(PATH_DATA +"ratio_0.25_loss_train.txt")
if DRAW_Line1: Line1_loss_val= np.loadtxt(PATH_DATA +"ratio_0.25_loss_val.txt")
if DRAW_Line1: Line1_epoch_times= np.loadtxt(PATH_DATA +"ratio_0.25_epoch_times.txt")
if DRAW_Line2: Line2_acc_train= np.loadtxt(PATH_DATA_SVRG +"ratio_0.5_acc_train.txt")
if DRAW_Line2: Line2_acc_val= np.loadtxt(PATH_DATA_SVRG +"ratio_0.5_acc_val.txt")
if DRAW_Line2: Line2_loss_train= np.loadtxt(PATH_DATA_SVRG +"ratio_0.5_loss_train.txt")
if DRAW_Line2: Line2_loss_val= np.loadtxt(PATH_DATA_SVRG +"ratio_0.5_loss_val.txt")
if DRAW_Line2: Line2_acc_test= np.loadtxt(PATH_DATA_SVRG +"ratio_0.5_acc_test.txt")
if DRAW_Line2: Line2_loss_test= np.loadtxt(PATH_DATA_SVRG +"ratio_0.5_loss_test.txt")
if DRAW_Line2: Line2_epoch_times= np.loadtxt(PATH_DATA_SVRG +"ratio_0.5_epoch_times.txt")
if DRAW_Line3: Line3_acc_train= np.loadtxt(PATH_DATA +"ratio_0.75_acc_train.txt")
if DRAW_Line3: Line3_acc_val= np.loadtxt(PATH_DATA +"ratio_0.75_acc_val.txt")
if DRAW_Line3: Line3_loss_train= np.loadtxt(PATH_DATA +"ratio_0.75_loss_train.txt")
if DRAW_Line3: Line3_loss_val= np.loadtxt(PATH_DATA +"ratio_0.75_loss_val.txt")
if DRAW_Line3: Line3_acc_test= np.loadtxt(PATH_DATA +"ratio_0.75_acc_test.txt")
if DRAW_Line3: Line3_loss_test= np.loadtxt(PATH_DATA +"ratio_0.75_loss_test.txt")
if DRAW_Line3: Line3_epoch_times= np.loadtxt(PATH_DATA +"ratio_0.75_epoch_times.txt")
if DRAW_Line4: Line4_acc_train= np.loadtxt(PATH_DATA +"ratio_1.0_acc_train.txt")
if DRAW_Line4: Line4_acc_val= np.loadtxt(PATH_DATA +"ratio_1.0_acc_val.txt")
if DRAW_Line4: Line4_loss_train= np.loadtxt(PATH_DATA +"ratio_1.0_loss_train.txt")
if DRAW_Line4: Line4_loss_val= np.loadtxt(PATH_DATA +"ratio_1.0_loss_val.txt")
if DRAW_Line4: Line4_acc_test= np.loadtxt(PATH_DATA +"ratio_1.0_acc_test.txt")
if DRAW_Line4: Line4_loss_test= np.loadtxt(PATH_DATA +"ratio_1.0_loss_test.txt")
if DRAW_Line4: Line4_epoch_times= np.loadtxt(PATH_DATA +"ratio_1.0_epoch_times.txt")
if DRAW_Line1: np.ones(num_epochs*0.8)*Line1_epoch_times[39]
# count_Line1 = 200
# count_Line2 = 200
# count_Line3 = 200
if DRAW_Line1: count_Line1 = np.arange(Line1_acc_val.shape[0])+1
if DRAW_Line2: count_Line2 = np.arange(Line2_acc_train.shape[0])+1
if DRAW_Line3: count_Line3 = np.arange(Line3_acc_val.shape[0])+1
if DRAW_Line4: count_Line4 = np.arange(Line4_acc_val.shape[0])+1
# print mlp_sgd_acc_train
MAXLENGTH = num_epochs
if (MAXLENGTH>0 or STARTPOINT>0): # Need add for epoch_times
if DRAW_Line1: count_Line1 = count_Line1[STARTPOINT:MAXLENGTH+1]
if DRAW_Line2: count_Line2 = count_Line2[STARTPOINT:MAXLENGTH+1]
if DRAW_Line3: count_Line3 = count_Line3[STARTPOINT:MAXLENGTH+1]
if DRAW_Line4: count_Line4 = count_Line4[STARTPOINT:MAXLENGTH+1]
if DRAW_Line1: Line1_acc_test = Line1_acc_test[STARTPOINT:MAXLENGTH+1]
if DRAW_Line2: Line2_acc_test = Line2_acc_test[STARTPOINT:MAXLENGTH+1]
if DRAW_Line3: Line3_acc_test = Line3_acc_test[STARTPOINT:MAXLENGTH+1]
if DRAW_Line4: Line4_acc_test = Line4_acc_test[STARTPOINT:MAXLENGTH+1]
if DRAW_Line1: Line1_loss_test = Line1_loss_test[STARTPOINT:MAXLENGTH+1]
if DRAW_Line2: Line2_loss_test = Line2_loss_test[STARTPOINT:MAXLENGTH+1]
if DRAW_Line3: Line3_loss_test = Line3_loss_test[STARTPOINT:MAXLENGTH+1]
if DRAW_Line4: Line4_loss_test = Line4_loss_test[STARTPOINT:MAXLENGTH+1]
if DRAW_Line1: Line1_acc_val = Line1_acc_val[STARTPOINT:MAXLENGTH+1]
if DRAW_Line2: Line2_acc_val = Line2_acc_val[STARTPOINT:MAXLENGTH+1]
if DRAW_Line3: Line3_acc_val = Line3_acc_val[STARTPOINT:MAXLENGTH+1]
if DRAW_Line4: Line4_acc_val = Line4_acc_val[STARTPOINT:MAXLENGTH+1]
if DRAW_Line1: Line1_loss_val = Line1_loss_val[STARTPOINT:MAXLENGTH+1]
if DRAW_Line2: Line2_loss_val = Line2_loss_val[STARTPOINT:MAXLENGTH+1]
if DRAW_Line3: Line3_loss_val = Line3_loss_val[STARTPOINT:MAXLENGTH+1]
if DRAW_Line4: Line4_loss_val = Line4_loss_val[STARTPOINT:MAXLENGTH+1]
if DRAW_Line1: Line1_acc_train = Line1_acc_train[STARTPOINT:MAXLENGTH+1]
if DRAW_Line2: Line2_acc_train = Line2_acc_train[STARTPOINT:MAXLENGTH+1]
if DRAW_Line3: Line3_acc_train = Line3_acc_train[STARTPOINT:MAXLENGTH+1]
if DRAW_Line4: Line4_acc_train = Line4_acc_train[STARTPOINT:MAXLENGTH+1]
if DRAW_Line1: Line1_loss_train = Line1_loss_train[STARTPOINT:MAXLENGTH+1]
if DRAW_Line2: Line2_loss_train = Line2_loss_train[STARTPOINT:MAXLENGTH+1]
if DRAW_Line3: Line3_loss_train = Line3_loss_train[STARTPOINT:MAXLENGTH+1]
if DRAW_Line4: Line4_loss_train = Line4_loss_train[STARTPOINT:MAXLENGTH+1]
if DRAW_Line1: Line1_epoch_times = Line1_epoch_times[STARTPOINT:MAXLENGTH+1]
if DRAW_Line2: Line2_epoch_times = Line2_epoch_times[STARTPOINT:MAXLENGTH+1]
if DRAW_Line3: Line3_epoch_times = Line3_epoch_times[STARTPOINT:MAXLENGTH+1]
if DRAW_Line4: Line4_epoch_times = Line4_epoch_times[STARTPOINT:MAXLENGTH+1]
# print Line1_acc_test
#PLOT
matplotlib.rcParams.update({'font.size': 16})
plt.figure(1)
plt.title('Loss of Validation Set')
if DRAW_Line1: plt.plot(count_Line1, Line1_loss_val, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(count_Line2, Line2_loss_val, SPEC_L2 ,label="Alpha = 0.05", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(count_Line3, Line3_loss_val, SPEC_L3 ,label="Alpha = 0.01", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(count_Line4, Line4_loss_val, SPEC_L4 ,label="Alpha = 0.001", linewidth = LINEWIDTH)
plt.xlabel('# Epochs')
plt.ylabel('Loss')
plt.legend()
# plt.show()
pylab.savefig(PATH_FIGURE+'CroszsModel_Validation_Set_Loss'+'.png',bbox_inches='tight')
plt.figure(2)
plt.title('Predict Accuracy of Validation Set')
if DRAW_Line1: plt.plot(count_Line1, Line1_acc_val, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(count_Line2, Line2_acc_val, SPEC_L2 ,label="Alpha = 0.05", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(count_Line3, Line3_acc_val, SPEC_L3 ,label="Alpha = 0.01", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(count_Line4, Line4_acc_val, SPEC_L4 ,label="Alpha = 0.001", linewidth = LINEWIDTH)
plt.xlabel('# Epochs')
plt.ylabel('Predict Accuracy')
plt.legend(bbox_to_anchor=(1,0.4))
# plt.show()
pylab.savefig(PATH_FIGURE+'CrossModel_Validation_Set_Predict_Accuracy'+'.png',bbox_inches='tight')
plt.figure(3)
plt.title('Loss of Training Set')
# if Y_LIM_FINE_TUNING: pylab.ylim([-0.01,0.25])
if DRAW_Line1: plt.plot(count_Line1, Line1_loss_train, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(count_Line2, Line2_loss_train, SPEC_L2 ,label="Alpha = 0.05", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(count_Line3, Line3_loss_train, SPEC_L3 ,label="Alpha = 0.01", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(count_Line4, Line4_loss_train, SPEC_L4 ,label="Alpha = 0.001", linewidth = LINEWIDTH)
plt.xlabel('# Epochs')
plt.ylabel('Loss')
plt.legend()
# plt.show()
pylab.savefig(PATH_FIGURE+'CrossModel_Training_Set_Loss'+'.png',bbox_inches='tight')
plt.figure(4)
plt.title('Predict Accuracy of Training Set')
if Y_LIM_FINE_TUNING: pylab.ylim([0.93,1.01])
if DRAW_Line1: plt.plot(count_Line1, Line1_acc_train, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(count_Line2, Line2_acc_train, SPEC_L2 ,label="Alpha = 0.05", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(count_Line3, Line3_acc_train, SPEC_L3 ,label="Alpha = 0.01", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(count_Line4, Line4_acc_train, SPEC_L4 ,label="Alpha = 0.001", linewidth = LINEWIDTH)
plt.xlabel('# Epochs')
plt.ylabel('Predict Accuracy')
plt.legend(bbox_to_anchor=(1,0.4))
# plt.show()
pylab.savefig(PATH_FIGURE+'CrossModel_Training_Set_Predict_Accuracy'+'.png',bbox_inches='tight')
plt.figure(5)
plt.title('Predict Accuracy of Test Set')
if DRAW_Line1: plt.plot(count_Line1, Line1_acc_test, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(count_Line2, Line2_acc_test, SPEC_L2 ,label="Alpha = 0055", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(count_Line3, Line3_acc_test, SPEC_L3 ,label="Alpha = 0015", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(count_Line4, Line4_acc_test, SPEC_L4 ,label="Alpha =0.0010", linewidth = LINEWIDTH)
plt.xlabel('# Epochs')
plt.ylabel('Predict Accuracy')
plt.legend(bbox_to_anchor=(1,0.4))
# plt.show()
pylab.savefig(PATH_FIGURE+'CrossModel_Test_Set_Predict_Accuracy'+'.png',bbox_inches='tight')
plt.figure(6)
plt.title('Loss of Test Set')
if DRAW_Line1: plt.plot(count_Line1, Line1_loss_test, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(count_Line2, Line2_loss_test, SPEC_L2 ,label="Alpha = 0055", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(count_Line3, Line3_loss_test, SPEC_L3 ,label="Alpha = 0015", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(count_Line4, Line4_loss_test, SPEC_L4 ,label="Alpha =0.0010", linewidth = LINEWIDTH)
plt.xlabel('# Epochs')
plt.ylabel('Loss')
plt.legend()
pylab.savefig(PATH_FIGURE+'CrossModel_Test_Set_Loss'+'.png',bbox_inches='tight')
# plt.show()
#PLOT Per Second
matplotlib.rcParams.update({'font.size': 16})
plt.figure(7)
plt.title('Loss of Validation Set')
if DRAW_Line1: plt.plot(Line1_epoch_times, Line1_loss_val, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(Line2_epoch_times, Line2_loss_val, SPEC_L2 ,label="Alpha = 0.05", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(Line3_epoch_times, Line3_loss_val, SPEC_L3 ,label="Alpha = 0.01", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(Line4_epoch_times, Line4_loss_val, SPEC_L4 ,label="Alpha = 0.001", linewidth = LINEWIDTH)
plt.xlabel('Seconds')
plt.ylabel('Loss')
plt.legend()
# plt.show()
pylab.savefig(PATH_FIGURE+'Time_CroszsModel_Validation_Set_Loss'+'.png',bbox_inches='tight')
plt.figure(8)
plt.title('Predict Accuracy of Validation Set')
if DRAW_Line1: plt.plot(Line1_epoch_times, Line1_acc_val, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(Line2_epoch_times, Line2_acc_val, SPEC_L2 ,label="Alpha = 0.05", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(Line3_epoch_times, Line3_acc_val, SPEC_L3 ,label="Alpha = 0.01", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(Line4_epoch_times, Line4_acc_val, SPEC_L4 ,label="Alpha = 0.001", linewidth = LINEWIDTH)
plt.xlabel('Seconds')
plt.ylabel('Predict Accuracy')
plt.legend(bbox_to_anchor=(1,0.4))
# plt.show()
pylab.savefig(PATH_FIGURE+'Time_CrossModel_Validation_Set_Predict_Accuracy'+'.png',bbox_inches='tight')
plt.figure(9)
plt.title('Loss of Training Set')
# if Y_LIM_FINE_TUNING: pylab.ylim([-0.01,0.25])
if DRAW_Line1: plt.plot(Line1_epoch_times, Line1_loss_train, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(Line2_epoch_times, Line2_loss_train, SPEC_L2 ,label="Alpha = 0.05", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(Line3_epoch_times, Line3_loss_train, SPEC_L3 ,label="Alpha = 0.01", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(Line4_epoch_times, Line4_loss_train, SPEC_L4 ,label="Alpha = 0.001", linewidth = LINEWIDTH)
plt.xlabel('Seconds')
plt.ylabel('Loss')
plt.legend()
# plt.show()
pylab.savefig(PATH_FIGURE+'Time_CrossModel_Training_Set_Loss'+'.png',bbox_inches='tight')
plt.figure(10)
plt.title('Predict Accuracy of Training Set')
if Y_LIM_FINE_TUNING: pylab.ylim([0.93,1.01])
if DRAW_Line1: plt.plot(Line1_epoch_times, Line1_acc_train, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(Line2_epoch_times, Line2_acc_train, SPEC_L2 ,label="Alpha = 0.05", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(Line3_epoch_times, Line3_acc_train, SPEC_L3 ,label="Alpha = 0.01", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(Line4_epoch_times, Line4_acc_train, SPEC_L4 ,label="Alpha = 0.001", linewidth = LINEWIDTH)
plt.xlabel('Seconds')
plt.ylabel('Predict Accuracy')
plt.legend(bbox_to_anchor=(1,0.4))
# plt.show()
pylab.savefig(PATH_FIGURE+'Time_CrossModel_Training_Set_Predict_Accuracy'+'.png',bbox_inches='tight')
plt.figure(11)
plt.title('Predict Accuracy of Test Set')
if DRAW_Line1: plt.plot(Line1_epoch_times, Line1_acc_test, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(Line2_epoch_times, Line2_acc_test, SPEC_L2 ,label="Alpha = 0055", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(Line3_epoch_times, Line3_acc_test, SPEC_L3 ,label="Alpha = 0015", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(Line4_epoch_times, Line4_acc_test, SPEC_L4 ,label="Alpha =0.0010", linewidth = LINEWIDTH)
plt.xlabel('Seconds')
plt.ylabel('Predict Accuracy')
plt.legend(bbox_to_anchor=(1,0.4))
# plt.show()
pylab.savefig(PATH_FIGURE+'Time_CrossModel_Test_Set_Predict_Accuracy'+'.png',bbox_inches='tight')
plt.figure(12)
plt.title('Loss of Test Set')
if DRAW_Line1: plt.plot(Line1_epoch_times, Line1_loss_test, SPEC_L1 ,label="Alpha = 0.1", linewidth = LINEWIDTH)
if DRAW_Line2: plt.plot(Line2_epoch_times, Line2_loss_test, SPEC_L2 ,label="Alpha = 0055", linewidth = LINEWIDTH)
if DRAW_Line3: plt.plot(Line3_epoch_times, Line3_loss_test, SPEC_L3 ,label="Alpha = 0015", linewidth = LINEWIDTH)
if DRAW_Line4: plt.plot(Line4_epoch_times, Line4_loss_test, SPEC_L4 ,label="Alpha =0.0010", linewidth = LINEWIDTH)
plt.xlabel('Seconds')
plt.ylabel('Loss')
plt.legend()
pylab.savefig(PATH_FIGURE+'Time_CrossModel_Test_Set_Loss'+'.png',bbox_inches='tight')
# plt.show()
print ("Finish drawing cross model plots.")
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv) or ('help' in sys.argv):
print("arg: NUM_EPOCHS")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['num_epochs'] = int(sys.argv[1])
main(**kwargs)
| mit | 5,457,736,702,800,827,000 | 43.189266 | 118 | 0.691364 | false |
fgcz/maxquant_wrapper | maxquant_wrapper/maxquant_wrapper.py | 1 | 30535 | #!/usr/bin/python
# Copyright 2015-2017 Christian Panse <[email protected]>
#
# This file is part of the recmap package on CRAN.
# https://github.com/fgcz/maxquant_wrapper/
#
# maxquant_wrapper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# maxquant_wrapper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with recmap. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import shutil
import unittest
import time
import subprocess
import re
import logging
import logging.handlers
import pprint
def create_logger(name="MaxQuant"):
"""
create a logger object
"""
syslog_handler = logging.handlers.SysLogHandler(address=("fgcz-ms.uzh.ch", 514))
formatter = logging.Formatter('%(name)s %(message)s')
syslog_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(20)
logger.addHandler(syslog_handler)
return logger
logger = create_logger()
class FgczMaxquantWrapper:
"""
this class does
- stage the input data
- compose a maxquant driver xml file adapted to the FGCZ infrastructure
- run maxquant
- stage the output data
staging:
- can be done by using samba or ssh
- the class can be executed on the python shell or via rpc
logging:
- is done by a general log server
input:
- a python data structure containing all information
note: this class is supposed to be run on a Microsoft Windows OS
TODO(cp,wew): the stagign and exec methods would be better separated into a infrastructure class
"""
"""
input:
QEXACTIVE_2:
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_01_Fetuin40fmol.raw
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_02_YPG1.raw
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_03_YPG2_GG.raw
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_04_YPG2_SL.raw
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_05_YPD3.raw
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_06_Fetuin40fmol.raw
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_07_YPD1.raw
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_08_YPD2_SL.raw
- [email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_09_YPG3.raw
output:
- [email protected]:/srv/www/htdocs//p1946/bfabric/Proteomics/MaxQuant_Scaffold_LFQ_tryptic_swissprot/2015/2015-09/2015-09-07//workunit_135076//203583.zip
parameters: {}
protocol: scp
"""
config = None
outputurl = None
scratchroot = os.path.normcase(r"d:\scratch_")
scratch = scratchroot
def __init__(self, config=None):
if not os.path.isdir(self.scratchroot):
try:
os.mkdir(self.scratchroot)
except:
print "scratch '{0}' does not exists.".format(self.scratchroot)
raise
if config:
self.config=config
def run_commandline(self, cmd, shell_flag=False):
"""
:param cmd:
:param shell_flag:
:return:
"""
(pid, return_code) = (None, None)
(out, err)=("", "")
tStart = time.time()
logger.info(cmd)
try:
p = subprocess.Popen(cmd, shell=shell_flag)
pid = p.pid
return_code = p.wait()
(out, err) = p.communicate()
p.terminate()
except OSError as e:
msg = "exception|pid={0}|OSError={1}".format(pid, e)
logger.info(msg)
print err
print out
raise
msg_info = "completed|pid={0}|time={1}|return_code={2}|cmd='{3}'" \
.format(pid, time.time() - tStart, return_code, cmd)
logger.info(msg_info)
print out
print err
return (return_code)
def map_url_scp2smb(self, url,
from_prefix_regex="[email protected]://srv/www/htdocs",
to_prefix="\\\\130.60.81.21\\data"):
"""maps an url from
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_01_Fetuin40fmol.raw'
to
'\\130.60.81.21\data\p1946\proteomics\qexactive_2\paolo_20150811_course\20150811_01_fetuin40fmol.raw'
if it can not be matched it returns None
"""
regex = re.compile("({0}.+)(p[0-9]+([\\/]).*)$".format(from_prefix_regex))
match = regex.match(url)
if match:
result_url = "{0}\{1}".format(to_prefix, os.path.normcase(match.group(2)))
return (result_url)
else:
return None
def print_config(self):
print "------"
pp = pprint.PrettyPrinter(width=70)
pp.pprint(self.config)
return True
def add_config(self, config):
self.config = config
return True
def add_outputurl(self, url=None):
"""
:param url:
:return:
"""
self.outputurl = url
return True
def create_scratch(self):
"""create scratch space
"""
# TODO(cp): what if workunit is not defined
self.scratch = os.path.normcase(
"{0}/{1}".format(self.scratchroot, self.config['job_configuration']['workunit_id']))
if not os.path.isdir(self.scratch):
try:
os.mkdir(self.scratch)
except:
logger.info("scratch '{0}' does not exists.".format(self.scratch))
raise
return True
def scp(self, src, dst,
scp_cmd=r"d:\fgcz\pscp.exe",
scp_option=r"-scp -i C:\Users\cpanse\id_rsa.ppk"
):
"""
this is the scp wrapper for data staging
:param src:
:param dst:
:param scp_cmd:
:param scp_option:
:return:
"""
cmd = "{0} {1} {2} {3}".format(scp_cmd, scp_option, src, dst)
self.run_commandline(cmd, shell_flag=False)
return (True)
def copy_input_to_scratch(self,
copy_method=lambda s,t: shutil.copyfile(s, t),
src_url_mapping=lambda x: x,
dst_url_mapping=lambda x: os.path.basename(x)):
"""
make input resources available on scratch
NOTE: we assume if the file is already in place it is identical to the src file.
:param copy_method:
:param src_url_mapping:
:param dst_url_mapping:
:return:
"""
_input = self.config['application']['input']
try:
self._fsrc_fdst = []
for i in _input.keys():
self._fsrc_fdst = self._fsrc_fdst + map(lambda x: (src_url_mapping(x), dst_url_mapping(x)), _input[i])
for (_fsrc, _fdst) in self._fsrc_fdst:
if os.path.isfile(_fdst):
logger.info("'{0}' is already there.".format(_fdst))
pass
else:
try:
logger.info("copy '{0}' from '{1}' ...".format(_fdst, _fsrc))
copy_method(_fsrc, _fdst)
except:
print "ERROR: fail copy failed."
raise
except:
logger.info("copying failed")
raise
return True
def compose_maxquant_driver_file(self, filename=None):
assert isinstance(filename, basestring)
fasta_filename = None
enzymes = "Trypsin/P"
try:
fasta_filename = self.config['application']['parameters']['FASTA']
except:
fasta_filename = r"D:\MaxQuantDBs\fgcz_swissprot_20121031.fasta"
try:
enzymes = self.config['application']['parameters']['Enzymes']
except:
enzymes = "Trypsin/P"
try:
variableModifications = self.config['application']['parameters']['variableModifications']
except:
variableModifications = "Acetyl (Protein N-term), Oxidation (M)"
variableModifications = "\n".join(map(lambda x: "\t<string>{0}</string>".format(x), variableModifications.replace(", ", ",").split(",")))
try:
fixedModifications = self.config['application']['parameters']['fixedModifications']
except:
fixedModifications = "Carbamidomethyl (C)"
if fixedModifications == "None":
fixedModifications = ""
else:
fixedModifications = "\n".join(map(lambda x: "\t<string>{0}</string>".format(x), fixedModifications.replace(", ", ",").split(",")))
# fasta_filename = os.path.normpath(os.path.normcase(fasta_filename))
fasta_filename = os.path.normpath(fasta_filename)
_xml="""<?xml version='1.0' encoding='UTF-8'?>
<MaxQuantParams xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' xmlns:xsd='http://www.w3.org/2001/XMLSchema' aifSilWeight='4' aifIsoWeight='2' aifTopx='20' aifCorrelation='0.47' aifCorrelationFirstPass='0.8' aifMinMass='0' aifMsmsTol='10' aifSecondPass='true' aifIterative='true' aifThresholdFdr='0.01'>
<slicePeaks>true</slicePeaks>
<tempFolder/>
<fixedCombinedFolder/>
<ionCountIntensities>false</ionCountIntensities>
<verboseColumnHeaders>false</verboseColumnHeaders>
<minTime>NaN</minTime>
<maxTime>NaN</maxTime>
<fullMinMz>-1.7976931348623157E+308</fullMinMz>
<fullMaxMz>1.7976931348623157E+308</fullMaxMz>
<calcPeakProperties>false</calcPeakProperties>
<useOriginalPrecursorMz>false</useOriginalPrecursorMz>
<minPeakLen>2</minPeakLen>
<filePaths>
{filePaths}
</filePaths>
<experiments>
{experiments}
</experiments>
<fractions>
{fractions}
</fractions>
<matching>
{matching}
</matching>
<paramGroupIndices>
{paramGroupIndices}
</paramGroupIndices>
<parameterGroups>
<parameterGroup>
<maxCharge>7</maxCharge>
<msInstrument>0</msInstrument>
<labelMods>
<string/>
</labelMods>
<lfqMinEdgesPerNode>3</lfqMinEdgesPerNode>
<lfqAvEdgesPerNode>6</lfqAvEdgesPerNode>
<fastLfq>true</fastLfq>
<lfqMinRatioCount>2</lfqMinRatioCount>
<useNormRatiosForHybridLfq>true</useNormRatiosForHybridLfq>
<maxLabeledAa>0</maxLabeledAa>
<maxNmods>5</maxNmods>
<maxMissedCleavages>2</maxMissedCleavages>
<multiplicity>1</multiplicity>
<enzymes>
<string>{enzymes}</string>
</enzymes>
<enzymesFirstSearch/>
<useEnzymeFirstSearch>false</useEnzymeFirstSearch>
<useVariableModificationsFirstSearch>false</useVariableModificationsFirstSearch>
<variableModifications>
{variableModifications}
</variableModifications>
<isobaricLabels/>
<variableModificationsFirstSearch/>
<hasAdditionalVariableModifications>false</hasAdditionalVariableModifications>
<additionalVariableModifications/>
<additionalVariableModificationProteins/>
<doMassFiltering>true</doMassFiltering>
<firstSearchTol>20</firstSearchTol>
<mainSearchTol>4.5</mainSearchTol>
<lcmsRunType>0</lcmsRunType>
<lfqMode>1</lfqMode>
<enzymeMode>0</enzymeMode>
<enzymeModeFirstSearch>0</enzymeModeFirstSearch>
</parameterGroup>
</parameterGroups>
<fixedModifications>
{fixedModifications}
</fixedModifications>
<multiModificationSearch>false</multiModificationSearch>
<compositionPrediction>false</compositionPrediction>
<fastaFiles>
<string>{fastaFiles}</string>
</fastaFiles>
<fastaFilesFirstSearch/>
<fixedSearchFolder/>
<advancedRatios>false</advancedRatios>
<rtShift>false</rtShift>
<separateLfq>false</separateLfq>
<lfqStabilizeLargeRatios>true</lfqStabilizeLargeRatios>
<lfqRequireMsms>true</lfqRequireMsms>
<decoyMode>revert</decoyMode>
<specialAas>KR</specialAas>
<includeContamiants>true</includeContamiants>
<equalIl>false</equalIl>
<topxWindow>100</topxWindow>
<maxPeptideMass>4600</maxPeptideMass>
<reporterPif>0.75</reporterPif>
<reporterFraction>0</reporterFraction>
<reporterBasePeakRatio>0</reporterBasePeakRatio>
<minDeltaScoreUnmodifiedPeptides>0</minDeltaScoreUnmodifiedPeptides>
<minDeltaScoreModifiedPeptides>17</minDeltaScoreModifiedPeptides>
<minScoreUnmodifiedPeptides>0</minScoreUnmodifiedPeptides>
<minScoreModifiedPeptides>40</minScoreModifiedPeptides>
<filterAacounts>true</filterAacounts>
<secondPeptide>true</secondPeptide>
<matchBetweenRuns>true</matchBetweenRuns>
<matchUnidentifiedFeatures>false</matchUnidentifiedFeatures>
<matchBetweenRunsFdr>false</matchBetweenRunsFdr>
<reQuantify>true</reQuantify>
<dependentPeptides>false</dependentPeptides>
<dependentPeptideFdr>0</dependentPeptideFdr>
<dependentPeptideMassBin>0</dependentPeptideMassBin>git
<msmsConnection>false</msmsConnection>
<ibaq>false</ibaq>
<useDeltaScore>false</useDeltaScore>
<avalon>false</avalon>
<msmsRecalibration>false</msmsRecalibration>
<ibaqLogFit>false</ibaqLogFit>
<razorProteinFdr>true</razorProteinFdr>
<deNovoSequencing>false</deNovoSequencing>
<deNovoVarMods>true</deNovoVarMods>
<massDifferenceSearch>false</massDifferenceSearch>
<minPepLen>7</minPepLen>
<peptideFdr>0.01</peptideFdr>
<proteinFdr>0.05</proteinFdr>
<siteFdr>0.01</siteFdr>
<minPeptideLengthForUnspecificSearch>8</minPeptideLengthForUnspecificSearch>
<maxPeptideLengthForUnspecificSearch>25</maxPeptideLengthForUnspecificSearch>
<useNormRatiosForOccupancy>true</useNormRatiosForOccupancy>
<minPeptides>1</minPeptides>
<minRazorPeptides>1</minRazorPeptides>
<minUniquePeptides>0</minUniquePeptides>
<useCounterparts>false</useCounterparts>
<minRatioCount>2</minRatioCount>
<restrictProteinQuantification>true</restrictProteinQuantification>
<restrictMods>
{variableModifications}
</restrictMods>
<matchingTimeWindow>2</matchingTimeWindow>
<alignmentTimeWindow>20</alignmentTimeWindow>
<numberOfCandidatesMultiplexedMsms>25</numberOfCandidatesMultiplexedMsms>
<numberOfCandidatesMsms>15</numberOfCandidatesMsms>
<massDifferenceMods/>
<crossLinkerSearch>false</crossLinkerSearch>
<crossLinker/>
<labileCrossLinkerSearch>false</labileCrossLinkerSearch>
<labileCrossLinker>DSSO</labileCrossLinker>
<RescoreMsx>false</RescoreMsx>
<msmsParamsArray>
<msmsParams Name='FTMS' InPpm='true' Deisotope='true' Topx='12' HigherCharges='true' IncludeWater='true' IncludeAmmonia='true' DependentLosses='true'>
<Tolerance>
<Value>20</Value>
<Unit>Ppm</Unit>
</Tolerance>
<DeNovoTolerance>
<Value>20</Value>
<Unit>Ppm</Unit>
</DeNovoTolerance>
</msmsParams>
<msmsParams Name='ITMS' InPpm='false' Deisotope='false' Topx='8' HigherCharges='true' IncludeWater='true' IncludeAmmonia='true' DependentLosses='true'>
<Tolerance>
<Value>0.5</Value>
<Unit>Dalton</Unit>
</Tolerance>
<DeNovoTolerance>
<Value>0.5</Value>
<Unit>Dalton</Unit>
</DeNovoTolerance>
</msmsParams>
<msmsParams Name='TOF' InPpm='false' Deisotope='false' Topx='10' HigherCharges='true' IncludeWater='true' IncludeAmmonia='true' DependentLosses='true'>
<Tolerance>
<Value>0.1</Value>
<Unit>Dalton</Unit>
</Tolerance>
<DeNovoTolerance>
<Value>0.1</Value>
<Unit>Dalton</Unit>
</DeNovoTolerance>
</msmsParams>
<msmsParams Name='Unknown' InPpm='false' Deisotope='false' Topx='10' HigherCharges='true' IncludeWater='true' IncludeAmmonia='true' DependentLosses='true'>
<Tolerance>
<Value>0.5</Value>
<Unit>Dalton</Unit>
</Tolerance>
<DeNovoTolerance>
<Value>0.5</Value>
<Unit>Dalton</Unit>
</DeNovoTolerance>
</msmsParams>
</msmsParamsArray>
<msmsCentroidMode>1</msmsCentroidMode>
<quantMode>1</quantMode>
<siteQuantMode>0</siteQuantMode>
</MaxQuantParams>
""".format(filePaths = "\n".join(map(lambda x: "\t<string>{0}</string>".format(x[1].encode('utf8').replace("/cygdrive/d/", "d:\\").replace("/", "\\")), self._fsrc_fdst)),
experiments = "\n".join(map(lambda x: "\t<string>{0}</string>".format(os.path.splitext(os.path.basename(x[1]))[0].encode('utf8')), self._fsrc_fdst)),
fractions = "\n".join(map(lambda x: "\t<short>32767</short>", self._fsrc_fdst)),
matching = "\n".join(map(lambda x: "\t<unsignedByte>3</unsignedByte>", self._fsrc_fdst)),
paramGroupIndices = "\n".join(map(lambda x: "\t<int>0</int>", self._fsrc_fdst)),
fastaFiles = fasta_filename,
enzymes = enzymes,
fixedModifications = fixedModifications,
variableModifications = variableModifications)
try:
with open(filename, "w") as f:
logger.info("writing '{0}' ...".format(filename))
f.write(_xml)
except:
logger.info("writing maxquant driver file '{0}' failed.".format(filename))
raise
return True
"""
the following function have to be adapted
"""
def stage_input(self):
"""
:return:
"""
logger.info("stage input data")
self.copy_input_to_scratch(copy_method=lambda x, y: self.scp(x, y),
dst_url_mapping=lambda x: os.path.normpath(r"{0}\{1}".format(self.scratch,
os.path.basename(x))))
def run_maxquant(self,
cmd=r"d:\fgcz\mxQnt_versions\MaxQuant_1.4.1.2\MaxQuant\bin\MaxQuantCmd.exe",
ncores=8):
logger.info("run maxquant")
mqpar_filename = os.path.normcase(r"{0}\maxquant_driver.xml".format(self.scratch))
self.compose_maxquant_driver_file(filename=mqpar_filename)
self.run_commandline("{0} -mqpar={1} -ncores={2}".format(cmd, mqpar_filename, ncores),
shell_flag=False)
return True
def stage_output(self, zip_cmd=r"C:\Program Files\7-zip\7z.exe"):
"""
zip all usefull output filed and copy it to an file exchange server
:return:
"""
"""
S:\cp_temp>"c:\Program Files\7-Zip\7z.exe" a -xr!proc -xr!search -xr!*.raw -xr!ps -xr!*tmp* s:\scratch_\dump2 d:\scratch_\135076d\*
"""
logger.info("stage output")
zip_file = "{0}.7z".format(self.scratch)
self.run_commandline(r"{0} a -xr!proc -xr!search -xr!*.raw -xr!ps {1} {2}\*".format(zip_cmd, self.scratch, self.scratch), shell_flag=False)
if self.outputurl and os.path.isfile(zip_file):
self.scp(src=zip_file, dst=self.outputurl)
return True
def clean(self):
"""
clean scratch space if no errors
"""
logger.info("clean is not implemeted yet")
pass
def run(self):
"""
this is the main method of the class
"""
self.create_scratch()
self.stage_input()
self.run_maxquant()
self.stage_output()
self.clean()
return "EXCHAGNGE URL"
class TestTargetMapping(unittest.TestCase):
"""
This is a class doing testing on a real infrastructure. Ensure that the SAN is available
(> net use s: \\fgcz-s-021.uzh.ch\data ...)
run
python -m unittest -v fgcz_maxquant_wrapper
"""
test_config = {'application': {'input': {'QEXACTIVE_2': [
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_01_Fetuin40fmol.raw',
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_02_YPG1.raw',
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_03_YPG2_GG.raw',
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_04_YPG2_SL.raw',
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_05_YPD3.raw',
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_06_Fetuin40fmol.raw',
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_07_YPD1.raw',
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_08_YPD2_SL.raw',
'[email protected]://srv/www/htdocs//p1946/Proteomics/QEXACTIVE_2/paolo_20150811_course/20150811_09_YPG3.raw']},
'protocol': 'scp', 'parameters': {}, 'output': [
'[email protected]:/srv/www/htdocs//p1946/bfabric/Proteomics/MaxQuant_Scaffold_LFQ_tryptic_swissprot/2015/2015-09/2015-09-07//workunit_135076//203583.zip']},
'job_configuration': {
'executable': '/home/bfabric/sgeworker/bin/fgcz_sge_MaxQuant_Scaffold_LFQ_fast',
'external_job_id': 46103, 'input': {'QEXACTIVE_2': [{'sample_id': 26524, 'resource_id': 202116,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32622',
'extract_id': 32622,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202116',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26524'},
{'sample_id': 26195, 'resource_id': 202115,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32648',
'extract_id': 32648,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202115',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26195'},
{'sample_id': 26195, 'resource_id': 202114,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32648',
'extract_id': 32648,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202114',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26195'},
{'sample_id': 26195, 'resource_id': 202113,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32648',
'extract_id': 32648,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202113',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26195'},
{'sample_id': 26196, 'resource_id': 202112,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32649',
'extract_id': 32649,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202112',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26196'},
{'sample_id': 26524, 'resource_id': 202111,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32622',
'extract_id': 32622,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202111',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26524'},
{'sample_id': 26196, 'resource_id': 202110,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32649',
'extract_id': 32649,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202110',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26196'},
{'sample_id': 26196, 'resource_id': 202109,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32649',
'extract_id': 32649,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202109',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26196'},
{'sample_id': 26195, 'resource_id': 202108,
'extract_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-extract.html?extractId=32648',
'extract_id': 32648,
'resource_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-resource.html?resourceId=202108',
'sample_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-sample.html?sampleId=26195'}]},
'stdout': {'url': '/home/bfabric/sgeworker/logs/workunitid-135076_resourceid-203583.out',
'protocol': 'file', 'resource_id': 203585}, 'output': {'protocol': 'scp',
'ssh_args': '-o StrictHostKeyChecking=no -c arcfour -2 -l bfabric -x',
'resource_id': 203583},
'stderr': {'url': '/home/bfabric/sgeworker/logs/workunitid-135076_resourceid-203583.err',
'protocol': 'file', 'resource_id': 203584},
'workunit_url': 'http://fgcz-bfabric.uzh.ch/bfabric/userlab/show-workunit.html?workunitId=135076',
'workunit_id': 135076}}
mqw = FgczMaxquantWrapper(config=test_config)
def setUp(self):
pass
def test_map_url_scp2smb(self):
# desired_result = os.path.normpath('p1000/Proteomics/TRIPLETOF_1/selevsek_20150119')
# self.assertTrue(desired_result == map_data_analyst_tripletof_1('p1000\Data\selevsek_20150119'))
# self.assertTrue(map_data_analyst_tripletof_1('p1000\data\selevsek_20150119') is None)
_input = self.test_config['application']['input']
for input_application in _input.keys():
map(lambda x: self.assertTrue(os.path.isfile(self.mqw.map_url_scp2smb(x) )),
_input[input_application])
def test_create_scratch(self):
self.assertTrue(self.mqw.create_scratch())
def test_copy_input_to_scratch(self):
self.assertTrue(self.mqw.create_scratch())
self.assertTrue(self.mqw.copy_input_to_scratch)
| gpl-3.0 | -3,839,874,976,932,913,700 | 43.970545 | 309 | 0.577599 | false |
brachyprint/brachyprint | src/mesh/core/face.py | 1 | 4852 |
# Brachyprint -- 3D printing brachytherapy moulds
# Copyright (C) 2013-14 James Cranch, Martin Green and Oliver Madge
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
A face class for the ``mesh'' package.
'''
from __future__ import division
class Face(object):
'''
A class representing a mesh face.
'''
def __init__(self, name, v1, v2, v3):
self.name = name
self.vertices = v1, v2, v3
self.normal = (v1 - v2).cross(v1 - v3).normalise()
self.volume = None
self.edges = []
def validate(self):
assert self.normal == (self.vertices[0] - self.vertices[1]).cross(self.vertices[0] - self.vertices[2]).normalise()
e1found = False
e2found = False
e3found = False
(u1,u2,u3) = self.vertices
for edge in self.edges:
v = (edge.v1, edge.v2)
assert edge.v1 in self.vertices
assert edge.v2 in self.vertices
if (v == (u1,u2)) or (v == (u2,u3)) or (v == (u3,u1)):
assert edge.lface is self
else:
assert edge.rface is self
assert edge.lface != edge.rface
e1found = e1found or (edge.v1 == self.vertices[0] and edge.v2 == self.vertices[1]) or (edge.v2 == self.vertices[0] and edge.v1 == self.vertices[1])
e2found = e2found or (edge.v1 == self.vertices[1] and edge.v2 == self.vertices[2]) or (edge.v2 == self.vertices[1] and edge.v1 == self.vertices[2])
e3found = e3found or (edge.v1 == self.vertices[2] and edge.v2 == self.vertices[0]) or (edge.v2 == self.vertices[2] and edge.v1 == self.vertices[0])
assert e1found and e2found and e3found
print "VALIDATED"
def add_edge(self, edge):
self.edges.append(edge)
def signed_volume(self):
v1, v2, v3 = self.vertices
return v1.dot(v2.cross(v3)) / 6.0
def area(self):
v1, v2, v3 = self.vertices
return (v2 - v1).cross(v3 - v1).magnitude() / 2.0
def centroid(self):
return reduce((lambda x,y:x+y), self.vertices) / 3
def bounding_box(self, tolerance = 0):
return ((min(v.x for v in self.vertices) - tolerance,
max(v.x for v in self.vertices) + tolerance),
(min(v.y for v in self.vertices) - tolerance,
max(v.y for v in self.vertices) + tolerance),
(min(v.z for v in self.vertices) - tolerance,
max(v.z for v in self.vertices) + tolerance))
def nearest_vertex(self, x, y, z):
return min([((v.x - x) ** 2 + (v.y - y) ** 2 + (v.z - z) ** 2, v) for v in self.vertices])[1]
def project2d(self):
# form basis vectors
a = (self.vertices[1]-self.vertices[0])
b = (self.vertices[2]-self.vertices[0])
n = b.cross(a)
u = a
v = n.cross(u)
u = u.normalise()
v = v.normalise()
n = u.cross(v)
origin = self.vertices[0]
fun = lambda vs: [vec.x*u+vec.y*v+origin for vec in vs]
return ([f.project2dvector(u,v) for f in self.vertices], u, v, fun)
def opposite_edge(self, vertex):
try:
return next(e for e in self.edges if e.v1 != vertex and e.v2 != vertex)
except:
print vertex
for e in self.edges:
print e.v1, e.v2
raise Exception("No Opposite Edge ?!?")
def neighbouring_faces(self):
"""Generates the faces which are adjacent to this one.
"""
for e in self.edges:
f = e.face_on_other_side(self)
if f is not None:
yield f
def parallel_plate(self, epsilon=0.00001):
"""Returns the set of all contiguous faces, starting with this one,
that lie in the same plane.
"""
old = set()
new = set([self])
while new:
f = new.pop()
old.add(f)
for g in f.neighbouring_faces():
if g not in old and g not in new and f.normal.cross(g.normal).magnitude() < epsilon:
new.add(g)
return old
| gpl-2.0 | 8,073,912,645,110,784,000 | 36.038168 | 159 | 0.566983 | false |
Idealcoder/Magic-Wand | lib/wand.py | 1 | 1933 | import time
import RPi.GPIO as GPIO
from lib.font import loadFont
class wand():
#================================================ GLOBALS ===================================
font = loadFont()
delay = 0 #Delay per line when writing
#================================================= FUNCTIONS ====================================
def writeByte(byte):
i=0
for b in str(str(bin(byte))[2:])[::-1]:
#[2:] is to remove 0b (python binary header) from str
# [::-1] inverts byte direction.
gp.output(i,b)
i+=1
while i<8:
gp.output(i,0)
i+=1
#Write individual character
def writeChar(char):
if char in wand.font:
char = wand.font[char]
else:
char = wand.font['pacman']
for byte in char:
#Loop through byte
wand.writeByte(byte)
time.sleep(wand.delay)
#Write string in sequence
def writeString(string):
wand.wait() # Wait for wand to be flicked
for char in string:
#Loop through each letter in string
wand.writeChar(char)
wand.writeChar('pad') # 1 line spacing between each letter
def wait():
#Wait for wand to be flicked
GPIO.mode(GPIO.BOARD)
while (GPIO.input(5) == True):
time.sleep(0.0001) #High polling rate
GPIO.mode(GPIO.BCM)
def load(delay):
wand.delay = delay
gp.load() # Setup GPIO pins for use
#Setup input
GPIO.mode(GPIO.BOARD)
GPIO.setup(5,GPIO.IN)
GPIO.mode(GPIO.BOARD)
wand.writeChar('pad') #switch all GPIO to Off
# GPIO wrapper for higher level access
class gp():
#For different pin set-ups
matrix = {}
matrix[0] = 18
matrix[1] = 23
matrix[2] = 24
matrix[3] = 4
matrix[4] = 22
matrix[5] = 27
matrix[6] = 17
matrix[7] = 25
#Output pin
def output(pin,b):
if int(b) == 1:
GPIO.output(gp.matrix[pin],True)
else:
GPIO.output(gp.matrix[pin],False)
#Initialize pins
def load():
GPIO.setmode(GPIO.BCM)
i=0
while (i < 8 ):
GPIO.setup(gp.matrix[i],GPIO.OUT)
i+=1
| mit | -973,664,377,961,744,900 | 20.01087 | 98 | 0.585618 | false |
linuxdeepin/deepin-ui | dtk/ui/volume_button.py | 1 | 23225 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Deepin, Inc.
# 2012 Hailong Qiu
#
# Author: Hailong Qiu <[email protected]>
# Maintainer: Hailong Qiu <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cache_pixbuf import CachePixbuf
from draw import draw_pixbuf
from theme import ui_theme
from utils import set_clickable_cursor
import tooltip as Tooltip
import gobject
import gtk
'''
100 / 500 = 0.2
x = 100 -> 100 * 0.2 = 20
x = 500 -> 500 * 0.2 = 100
x = 100 -> 100 * 0.2 = 20
'''
ZERO_STATE = 0
MIN_STATE = 1
MID_STATE = 2
MAX_STATE = 3
MUTE_STATE = -1
MOUSE_VOLUME_STATE_PRESS = 1
MOUSE_VOLUME_STATE_HOVER = 2
MOUSE_VOLUME_STATE_NORMAL = -1
VOLUME_RIGHT = "right"
VOLUME_LEFT = "left"
class VolumeButton(gtk.Button):
'''
Volume button.
'''
__gsignals__ = {
"volume-state-changed":(gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,(gobject.TYPE_INT,gobject.TYPE_INT,))
}
def __init__(self,
volume_max_value = 100,
volume_width = 52,
volume_x = 0,
volume_y = 15,
line_height = 3,
volume_padding_x = 5,
volume_level_values = [(1, 33),(34, 66),(67, 100)],
scroll_bool = False,
press_emit_bool = False,
inc_value=5,
bg_pixbuf = ui_theme.get_pixbuf("volumebutton/bg.png"),
fg_pixbuf = ui_theme.get_pixbuf("volumebutton/fg.png"),
zero_volume_normal_pixbuf = ui_theme.get_pixbuf("volumebutton/zero_normal.png"),
zero_volume_hover_pixbuf = ui_theme.get_pixbuf("volumebutton/zero_hover.png"),
zero_volume_press_pixbuf = ui_theme.get_pixbuf("volumebutton/zero_press.png"),
min_volume_normal_pixbuf = ui_theme.get_pixbuf("volumebutton/lower_normal.png"),
min_volume_hover_pixbuf = ui_theme.get_pixbuf("volumebutton/lower_hover.png"),
min_volume_press_pixbuf = ui_theme.get_pixbuf("volumebutton/lower_press.png"),
mid_volume_normal_pixbuf = ui_theme.get_pixbuf("volumebutton/middle_normal.png"),
mid_volume_hover_pixbuf = ui_theme.get_pixbuf("volumebutton/middle_hover.png"),
mid_volume_press_pixbuf = ui_theme.get_pixbuf("volumebutton/middle_press.png"),
max_volume_normal_pixbuf = ui_theme.get_pixbuf("volumebutton/high_normal.png"),
max_volume_hover_pixbuf = ui_theme.get_pixbuf("volumebutton/high_hover.png"),
max_volume_press_pixbuf = ui_theme.get_pixbuf("volumebutton/high_press.png"),
mute_volume_normal_pixbuf = ui_theme.get_pixbuf("volumebutton/mute_normal.png"),
mute_volume_hover_pixbuf = ui_theme.get_pixbuf("volumebutton/mute_hover.png"),
mute_volume_press_pixbuf = ui_theme.get_pixbuf("volumebutton/mute_press.png"),
point_volume_pixbuf = ui_theme.get_pixbuf("volumebutton/point_normal.png"),
):
'''
Initialize VolumeButton class.
@param volume_max_value: Maximum value of volume, default is 100.
@param volume_width: Width of volume button widget, default is 52 pixel.
@param volume_x: X padding of volume button widget.
@param volume_y: Y padding of volume button widget.
@param line_height: Height of volume progressbar, default is 3 pixel.
@param volume_padding_x: X padding value around volume progressbar.
@param volume_level_values: The values of volume level.
@param scroll_bool: True allowed scroll to change value, default is False.
@param press_emit_bool: True to emit `volume-state-changed` signal when press, default is False.
@param inc_value: The increase value of volume change, default is 5.
'''
gtk.Button.__init__(self)
###########################
if volume_x < max_volume_normal_pixbuf.get_pixbuf().get_width():
volume_x = max_volume_normal_pixbuf.get_pixbuf().get_width()
'''Init pixbuf.'''
self.__bg_pixbuf = bg_pixbuf
self.__fg_pixbuf = fg_pixbuf
self.__bg_cache_pixbuf = CachePixbuf()
self.__fg_cache_pixbuf = CachePixbuf()
# zero volume pixbuf.
self.__zero_volume_normal_pixbuf = zero_volume_normal_pixbuf
self.__zero_volume_hover_pixbuf = zero_volume_hover_pixbuf
self.__zero_volume_press_pixbuf = zero_volume_press_pixbuf
# min volume pixbuf.
self.__min_volume_normal_pixbuf = min_volume_normal_pixbuf
self.__min_volume_hover_pixbuf = min_volume_hover_pixbuf
self.__min_volume_press_pixbuf = min_volume_press_pixbuf
# mid volume pixbuf:
self.__mid_volume_normal_pixbuf = mid_volume_normal_pixbuf
self.__mid_volume_hover_pixbuf = mid_volume_hover_pixbuf
self.__mid_volume_press_pixbuf = mid_volume_press_pixbuf
# max volume pixbuf[normal, hover, press].
self.__max_volume_normal_pixbuf = max_volume_normal_pixbuf
self.__max_volume_hover_pixbuf = max_volume_hover_pixbuf
self.__max_volume_press_pixbuf = max_volume_press_pixbuf
# mute volume pixbuf[normal, hover, press].
self.__mute_volume_normal_pixbuf = mute_volume_normal_pixbuf
self.__mute_volume_hover_pixbuf = mute_volume_hover_pixbuf
self.__mute_volume_press_pixbuf = mute_volume_press_pixbuf
# point volume pixbuf.
self.__point_volume_pixbuf = point_volume_pixbuf
'''Init Set VolumeButton attr.'''
'''Init value.'''
self.__press_emit_bool = press_emit_bool
self.__line_height = line_height
self.__current_value = 0
self.__mute_bool = False
self.temp_mute_bool = False
self.__drag = False
self.__volume_max_value = volume_max_value
self.__volume_width = volume_width
self.__volume_left_x = volume_x - self.__max_volume_normal_pixbuf.get_pixbuf().get_width() - volume_padding_x
self.__volume_left_y = volume_y - self.__max_volume_normal_pixbuf.get_pixbuf().get_height()/2 + self.__point_volume_pixbuf.get_pixbuf().get_height()/2
self.__volume_right_x = volume_x
self.__volume_right_y = volume_y
'''Left'''
self.volume_level_values = volume_level_values
self.__volume_state = MIN_STATE
self.__mouse_state = MOUSE_VOLUME_STATE_NORMAL
'''Right'''
# bg value.
self.__bg_x = 0
self.__bg_y = self.__volume_right_y
self.__bg_padding_x = self.__volume_right_x
# fg value.
self.__fg_x = 0
self.__fg_y = self.__volume_right_y
self.__fg_padding_x = self.__volume_right_x
# point value.
self.__point_y = self.__volume_right_y
self.__point_padding_x = self.__volume_right_x
self.inc_value = inc_value
'''Init VolumeButton event.'''
self.add_events(gtk.gdk.ALL_EVENTS_MASK)
self.connect("expose-event", self.__expose_draw_volume)
self.connect("motion-notify-event", self.__motion_mouse_set_point)
self.connect("button-press-event", self.__press_mouse_set_point)
self.connect("button-release-event", self.__release_mouse_set_point)
'''Event value'''
self.press_bool = False
# scroll event.
if scroll_bool:
self.connect("scroll-event", self.__scroll_mouse_set_point)
self.set_size_request(volume_width + self.__volume_left_x + self.__volume_right_x + self.__mute_volume_normal_pixbuf.get_pixbuf().get_width(), 30)
set_clickable_cursor(self)
def set_press_emit_bool(self, emit_bool):
self.__press_emit_bool = emit_bool
def __set_point_padding_x(self, event):
self.__mute_bool = False
self.__point_padding_x = int(event.x)
self.queue_draw()
def __press_mouse_set_point(self, widget, event):
temp_x = int(event.x)
temp_min_x = self.__bg_x + self.__bg_padding_x - self.__point_volume_pixbuf.get_pixbuf().get_width()/2
temp_max_x = self.__bg_x + self.__bg_padding_x + self.__volume_width + self.__point_volume_pixbuf.get_pixbuf().get_width()/2
self.queue_draw()
if temp_min_x < temp_x < temp_max_x:
self.__set_point_padding_x(event)
self.__drag = True
else:
if self.__volume_left_x <= temp_x <= temp_min_x:
# Set mouse state press.
self.__mouse_state = MOUSE_VOLUME_STATE_PRESS
self.temp_mute_bool = True
self.press_bool = True
def __release_mouse_set_point(self, widget, event):
# Set mouse state normal.
self.__mouse_state = MOUSE_VOLUME_STATE_NORMAL
self.__drag = False
self.press_bool = False
temp_x = int(event.x)
temp_y = int(event.y)
temp_min_x = self.__bg_x + self.__bg_padding_x - self.__point_volume_pixbuf.get_pixbuf().get_width()/2
if self.__volume_left_x <= temp_x <= temp_min_x and ( self.__volume_left_y <=temp_y < (self.__volume_left_y + self.__mute_volume_hover_pixbuf.get_pixbuf().get_height())):
if self.temp_mute_bool and not self.__mute_bool:
# Set mute state.
self.__mute_bool = not self.__mute_bool
self.__volume_state = MUTE_STATE
self.temp_mute_bool = False
else: # modify state.
self.__mute_bool = False
self.temp_mute_bool = False
self.__set_volume_value_to_state(self.__current_value)
self.queue_draw()
if self.__press_emit_bool:
self.emit("volume-state-changed", self.__current_value, self.__volume_state)
self.queue_draw()
def __motion_mouse_set_point(self, widget, event):
temp_x = int(event.x)
temp_y = int(event.y)
temp_min_x = self.__bg_x + self.__bg_padding_x - self.__point_volume_pixbuf.get_pixbuf().get_width()/2
if (self.__volume_left_x <= temp_x <= temp_min_x) and ( self.__volume_left_y <=temp_y < (self.__volume_left_y + self.__mute_volume_hover_pixbuf.get_pixbuf().get_height())):
self.__mouse_state = MOUSE_VOLUME_STATE_HOVER
else:
self.__mouse_state = MOUSE_VOLUME_STATE_NORMAL
if not self.press_bool:
self.queue_draw()
if self.__drag:
self.__set_point_padding_x(event)
def __scroll_mouse_set_point(self, widget, event):
if event.direction == gtk.gdk.SCROLL_UP:
self.volume_other_set_value(VOLUME_RIGHT)
elif event.direction == gtk.gdk.SCROLL_DOWN:
self.volume_other_set_value(VOLUME_LEFT)
def volume_other_set_value(self, volume_type):
point_width_average = self.__point_volume_pixbuf.get_pixbuf().get_width() / 2
temp_min = (self.__point_padding_x - point_width_average)
temp_max = (self.__point_padding_x + self.__volume_width - point_width_average)
self.__mute_bool = False
if volume_type == VOLUME_RIGHT:
if self.__point_padding_x >= temp_max:
self.__point_padding_x = temp_max
else:
self.__point_padding_x += self.inc_value
elif volume_type == VOLUME_LEFT:
if self.__point_padding_x <= temp_min:
self.__point_padding_x = temp_min
else:
self.__point_padding_x -= self.inc_value
self.queue_draw()
def __expose_draw_volume(self, widget, event):
self.__draw_volume_right(widget, event) # 1: get current value.
self.__set_volume_value_to_state(self.__current_value) # 2: value to state.
self.__draw_volume_left(widget, event) # 3: draw state pixbuf.
if not self.__press_emit_bool:
self.emit("volume-state-changed", self.__current_value, self.__volume_state)
# propagate_expose(widget, event)
return True
'''Left function'''
@property
def volume_state(self):
return self.__volume_state
@volume_state.setter
def volume_state(self, state):
if state == MIN_STATE:
self.__volume_state = MIN_STATE
elif state == ZERO_STATE:
self.__volume_state = ZERO_STATE
elif state == MID_STATE:
self.__volume_state = MID_STATE
elif state == MAX_STATE:
self.__volume_state = MAX_STATE
elif state == MUTE_STATE:
self.__volume_state = MUTE_STATE
@volume_state.getter
def volume_state(self):
return self.__volume_state
@volume_state.deleter
def volume_state(self):
del self.__volume_state
def set_volume_level_values(self, show_value):
try:
show_value[0][0] - show_value[0][1]
show_value[1][0] - show_value[1][1]
show_value[2][0] - show_value[2][1]
self.volume_level_values = show_value
except:
print "Error show value!!"
def __set_volume_value_to_state(self, value):
if not self.__mute_bool:
temp_show_value = self.volume_level_values
if temp_show_value[0][0] <= value <= temp_show_value[0][1]:
self.__volume_state = MIN_STATE
elif temp_show_value[1][0] <= value <= temp_show_value[1][1]:
self.__volume_state = MID_STATE
elif temp_show_value[2][0] <= value <= temp_show_value[2][1]:
self.__volume_state = MAX_STATE
elif 0 == value:
self.__volume_state = ZERO_STATE
else:
self.__volume_state = MUTE_STATE
def set_volume_mute(self, mute_flag=True):
if mute_flag:
self.temp_mute_bool = False
self.__mute_bool = True
self.__volume_state = MUTE_STATE
else:
self.temp_mute_bool = False
self.__mute_bool = False
self.__set_volume_value_to_state(self.value)
self.queue_draw()
def __draw_volume_left(self, widget, event):
cr = widget.window.cairo_create()
x, y, w, h = widget.allocation
if self.__volume_state == MUTE_STATE: # mute state.
if self.__mouse_state == MOUSE_VOLUME_STATE_NORMAL:
pixbuf = self.__mute_volume_normal_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_HOVER:
pixbuf = self.__mute_volume_hover_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_PRESS:
pixbuf = self.__mute_volume_press_pixbuf
elif self.__volume_state == ZERO_STATE: # zero state.
if self.__mouse_state == MOUSE_VOLUME_STATE_NORMAL:
pixbuf = self.__zero_volume_normal_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_HOVER:
pixbuf = self.__zero_volume_hover_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_PRESS:
pixbuf = self.__zero_volume_press_pixbuf
elif self.__volume_state == MIN_STATE: # min state.
if self.__mouse_state == MOUSE_VOLUME_STATE_NORMAL:
pixbuf = self.__min_volume_normal_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_HOVER:
pixbuf = self.__min_volume_hover_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_PRESS:
pixbuf = self.__min_volume_press_pixbuf
elif self.__volume_state == MID_STATE: # mid state.
if self.__mouse_state == MOUSE_VOLUME_STATE_NORMAL:
pixbuf = self.__mid_volume_normal_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_HOVER:
pixbuf = self.__mid_volume_hover_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_PRESS:
pixbuf = self.__mid_volume_press_pixbuf
elif self.__volume_state == MAX_STATE: # max state.
if self.__mouse_state == MOUSE_VOLUME_STATE_NORMAL:
pixbuf = self.__max_volume_normal_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_HOVER:
pixbuf = self.__max_volume_hover_pixbuf
elif self.__mouse_state == MOUSE_VOLUME_STATE_PRESS:
pixbuf = self.__max_volume_press_pixbuf
draw_pixbuf(cr,
pixbuf.get_pixbuf(),
x + self.__volume_left_x,
y + self.__volume_left_y,
)
'''Right function'''
@property
def line_height(self):
return self.__line_height
@line_height.setter
def line_height(self, width):
self.__line_height = width
self.queue_draw()
@line_height.getter
def line_height(self):
return self.__line_height
@line_height.deleter
def line_height(self):
del self.__line_height
@property
def value(self):
return self.__current_value
@value.setter
def value(self, value):
if 0 <= value <= self.__volume_max_value:
Tooltip.text(self, str(value))
temp_padding = (float(self.__volume_max_value) / self.__volume_width)
temp_padding_x = float(value) / temp_padding
self.__point_padding_x = temp_padding_x + ((self.__fg_padding_x))
self.queue_draw()
@value.getter
def value(self):
return self.__current_value
def set_volume_position(self, x, y):
self.__volume_right_x = x
self.__volume_right_y = y
# Set x.
self.__bg_padding_x = self.__volume_right_x
self.__fg_padding_x = self.__volume_right_x
self.__point_padding_x = self.__volume_right_x
# Set y.
self.__bg_y = self.__volume_right_y
self.__fg_y = self.__volume_right_y
self.__point_y = self.__volume_right_y
@property
def max_value(self):
self.__volume_max_value
@max_value.setter
def max_value(self, max_value):
self.__volume_max_value = max_value
@max_value.getter
def max_value(self):
return self.__volume_max_value
@max_value.deleter
def max_value(self):
del self.__volume_max_value
def __draw_volume_right(self, widget, event):
cr = widget.window.cairo_create()
cr.set_line_width(self.__line_height)
x, y, w, h = widget.allocation
fg_height_average = (self.__point_volume_pixbuf.get_pixbuf().get_height() - self.__fg_pixbuf.get_pixbuf().get_height()) / 2
bg_height_average = (self.__point_volume_pixbuf.get_pixbuf().get_height() - self.__bg_pixbuf.get_pixbuf().get_height()) / 2
point_width_average = self.__point_volume_pixbuf.get_pixbuf().get_width() / 2
##################################################
# Draw bg.
if self.__volume_width > 0:
self.__bg_cache_pixbuf.scale(self.__bg_pixbuf.get_pixbuf(),
self.__volume_width,
self.__bg_pixbuf.get_pixbuf().get_height(),
)
draw_pixbuf(
cr,
self.__bg_cache_pixbuf.get_cache(),
x + self.__bg_x + self.__bg_padding_x,
y + self.__bg_y + bg_height_average)
temp_fg_padding_x = self.__point_padding_x - (self.__fg_x + self.__fg_padding_x)
if temp_fg_padding_x < 0:
temp_fg_padding_x = 0
if temp_fg_padding_x > self.__volume_width:
temp_fg_padding_x = self.__volume_width
# Get current value.
self.__current_value = temp_fg_padding_x * (float(self.__volume_max_value) / self.__volume_width)
# Draw fg.
if temp_fg_padding_x > 0:
self.__fg_cache_pixbuf.scale(self.__fg_pixbuf.get_pixbuf(),
int(temp_fg_padding_x),
self.__fg_pixbuf.get_pixbuf().get_height(),
)
draw_pixbuf(
cr,
self.__fg_cache_pixbuf.get_cache(),
x + self.__fg_x + self.__fg_padding_x,
y + self.__fg_y + fg_height_average)
#################################################
# Draw point.
temp_point_padding_x = (self.__point_padding_x - point_width_average)
temp_min = (self.__volume_right_x - point_width_average)
temp_max = (self.__volume_right_x + self.__volume_width - point_width_average)
if temp_point_padding_x < temp_min:
temp_point_padding_x = temp_min
if temp_point_padding_x > temp_max:
temp_point_padding_x = temp_max
draw_pixbuf(cr,
self.__point_volume_pixbuf.get_pixbuf(),
x + temp_point_padding_x,
y + self.__point_y)
gobject.type_register(VolumeButton)
if __name__ == "__main__":
import random
from dtk.ui.window import Window
def set_time_position():
volume_button.value = (random.randint(0, 100))
return True
def get_volume_value(volume_button, value, volume_state):
print "[get_volume_value:]"
print "volume_button:%s" % volume_button
print "value:%s" % value
print "volume_state:%s" % volume_state
def set_value_button_clicked(widget):
print volume_button.volume_state
volume_button.max_value = 200
volume_button.value = 100
volume_button.line_height = 4 # Set draw line width.
# volume_button.set_volume_level_values([(0,10),(11,80),(81,100)])
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
# win = Window()
win.set_size_request(200, 120)
win.set_title("测试音量按钮")
main_vbox = gtk.VBox()
volume_button = VolumeButton(100,220)
volume_button.value = 100
# volume_button = VolumeButton()
volume_button.connect("volume-state-changed", get_volume_value)
set_value_button = gtk.Button("设置音量的值")
set_value_button.connect("clicked", set_value_button_clicked)
main_vbox.pack_start(volume_button, True, True)
main_vbox.pack_start(set_value_button, True, True)
# win.add(volume_button)
win.add(main_vbox)
# win.window_frame.add(main_vbox)
# gtk.timeout_add(500, set_time_position)
win.show_all()
gtk.main()
| gpl-3.0 | -3,288,997,962,322,264,000 | 40.504472 | 180 | 0.571139 | false |
sbg/Mitty | mitty/test/lib/test_vcfio.py | 1 | 2460 | import os
from nose.tools import assert_raises, assert_sequence_equal
import mitty.lib.vcfio as vio
import mitty.test
def test_basic():
"""I/O: Basic vcf loading."""
v = vio.load_variant_file(
os.path.join(mitty.test.example_data_dir, 'tiny.vcf.gz'), 'g0_s0',
os.path.join(mitty.test.example_data_dir, 'tiny.8-14.bed'))
# A test of pysam properly filtering variants to region
assert v[0]['v'][1][0].tuple() == (11, 'CAA', 'C', 'D', 2), v[0]['v']
assert v[0]['v'][0][0].tuple() == (14, 'G', 'T', 'X', 0), v[0]['v']
assert len(v[0]['v'][0]) == 1
def test_complex_variant_error():
"""I/O: Flag complex variants in VCF"""
assert_raises(ValueError,
vio.load_variant_file,
os.path.join(mitty.test.example_data_dir, 'flawed-tiny.vcf.gz'),
'g0_s0',
os.path.join(mitty.test.example_data_dir, 'tiny.whole.bed'))
# This tests
# 1. test-del-snp.vcf : DEL followed immediately by a SNP
# 2. test-snp-ins.vcf : SNP followed by an INS
# 3. test-snp-del-opposite-phase.vcf : DEL and SNP oppositely phased
# ATGACGTATCCAAGGAGGCGTTACC
# 12345678901234567890
#
def test_crowded_vcf():
"""I/O: These are all legal - if crowded - variant combinations"""
for f in ['test-del-snp.vcf.gz', 'test-snp-ins.vcf.gz', 'test-snp-del-opposite-phase.vcf.gz']:
v = vio.load_variant_file(
os.path.join(mitty.test.example_data_dir, f),
'g0_s0',
os.path.join(mitty.test.example_data_dir, 'tiny.whole.bed'))
def load_data():
"""A set of reference and variants for testing read generation."""
seq = open(os.path.join(mitty.test.example_data_dir, 'tiny.fasta')).readlines()[1]
# df = vio.read_sample_from_vcf(os.path.join(mitty.test.example_data_dir, 'tiny.vcf'), 'g0_s0')
df = vio.load_variant_file(
os.path.join(mitty.test.example_data_dir, 'tiny.vcf.gz'),
'g0_s0',
os.path.join(mitty.test.example_data_dir, 'tiny.whole.bed'))
return seq, df
def test_snp_expansion1():
"""I/O: SNP expansion basic"""
ref_seq, vcf = load_data()
v = vcf[0]['v'][1][0]
assert v.cigarop == 'X', v
assert v.oplen == 0, v
def test_ins_expansion1():
"""I/O: INS expansion basic"""
ref_seq, vcf = load_data()
v = vcf[0]['v'][1][1]
assert v.cigarop == 'I', v
assert v.oplen == 3, v
def test_del_expansion1():
"""I/O: DEL expansion basic"""
ref_seq, vcf = load_data()
v = vcf[0]['v'][1][2]
assert v.cigarop == 'D', v
assert v.oplen == 2, v | apache-2.0 | 6,550,731,465,486,002,000 | 29.7625 | 97 | 0.623171 | false |
swoop-inc/graflux | test/query_engine_test.py | 1 | 3803 | import six
import unittest
import time
from graflux.query_engine import QueryEngine
from influxdb import InfluxDBClient
import influxdb.exceptions
class QueryEngineTest(unittest.TestCase):
def setUp(self):
self.db = 'graflux_test'
self.config = {
'influxdb': {
'host': 'localhost',
'port': 8086,
'db': self.db
},
'aggregates': [
['.sum$', 'sum'],
['.gauge$', 'last'],
['.*', 'mean']
],
'steps': [
[1000, 60],
[5000, 300],
[10000, 600]
]
}
self.client = InfluxDBClient(database=self.db)
self.query_engine = QueryEngine(self.config)
def clean_db(self):
try:
self.client.drop_database(self.db)
except influxdb.exceptions.InfluxDBClientError:
pass
self.client.create_database(self.db)
def create_test_data(self, metrics):
data = [{
"measurement": metric,
"tags": {},
"fields": {
"value": 1,
}
}
for metric in metrics]
self.assertTrue(self.client.write_points(data))
def test_get_series_empty_db(self):
self.clean_db()
result = self.query_engine.get_series()
six.assertCountEqual(self, result, [])
def test_get_series(self):
metrics = [
'test.series.one',
'test.series.two'
]
self.clean_db()
self.create_test_data(metrics)
result = self.query_engine.get_series()
six.assertCountEqual(self, result, metrics)
def test_build_influx_query(self):
query = self.query_engine.build_influx_query('test.metric', 0, 500000)
self.assertEqual(query, 'SELECT mean(value) AS value FROM test.metric WHERE time > 0s AND time <= 500000s GROUP BY time(300s)')
def test_query(self):
metrics = [
'test.series.one',
'test.series.two'
]
self.clean_db()
self.create_test_data(metrics)
now = int(time.time())
start = now - 5000
end = now + 5000
result = self.query_engine.query(metrics, start, end)
self.assertEqual(result['from'], start)
self.assertEqual(result['to'], end)
self.assertEqual(result['step'], 600)
six.assertCountEqual(self, result['series'], {
'test.series.one': [1],
'test.series.two': [1]
})
def test_lookup_aggregate(self):
self.assertEqual(self.query_engine.lookup_aggregate('test.metric.sum'), 'sum')
self.assertEqual(self.query_engine.lookup_aggregate('test.metric.gauge'), 'last')
self.assertEqual(self.query_engine.lookup_aggregate('test.metric.randomness'), 'mean')
def test_determine_interval(self):
self.assertEqual(self.query_engine.determine_interval(0, 500), 60)
self.assertEqual(self.query_engine.determine_interval(0, 5000), 300)
self.assertEqual(self.query_engine.determine_interval(0, 50000), 600)
def test_build_query_sets(self):
sum_metrics = [
'test.metric.one.sum',
'test.metric.two.sum'
]
mean_metrics = [
'test.metric.whatever'
]
last_metrics = [
'test.metric.gauge'
]
sets = self.query_engine.build_query_sets(sum_metrics + mean_metrics + last_metrics)
six.assertCountEqual(self, sets, {
'mean': mean_metrics,
'last': last_metrics,
'sum': sum_metrics
})
if __name__ == '__main__':
unittest.main()
| mit | -1,794,805,965,491,786,800 | 27.810606 | 135 | 0.536682 | false |
zkota/pyblio-1.3 | Legacy/GnomeUI/Document.py | 1 | 39739 | # -*- coding: utf-8 -*-
#
# This file is part of pybliographer
#
# Copyright (C) 1998-2004 Frederic GOBRY
# Email : [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
''' This module defines a Document class '''
import gobject
from gnome import ui
import gnome
import gtk
import gtk.glade
from gtk import gdk
from Legacy.GnomeUI import Editor, Entry, FileSelector, Format
from Legacy.GnomeUI import Index, OpenURL, Search, Utils, Citation
from Legacy.GnomeUI.Sort import SortDialog
from Legacy.GnomeUI.Medline import MedlineUI
from Legacy import Base, Config, Connector, Exceptions, Fields, Open
from Legacy import Resource, Selection, Sort, Types, version
from Legacy import Search as SearchCls
import Legacy.Style.Utils
import os, string, copy, types, sys, traceback, stat
import cPickle as pickle
printable = string.lowercase + string.uppercase + string.digits
uim_content = '''
<ui>
<menubar name="Menubar">
<menu action="File">
<menuitem action="New"/>
<menuitem action="Open"/>
<menuitem action="Open_URL"/>
<menuitem action="Merge"/>
<menuitem action="Query"/>
<menuitem action="Save"/>
<menuitem action="Save_As"/>
<separator/>
<menu action="Recent">
<placeholder name="Previous"/>
</menu>
<separator/>
<menuitem action="Close"/>
<menuitem action="Quit"/>
</menu>
<menu action="EditMenu">
<menuitem action="Cut"/>
<menuitem action="Copy"/>
<menuitem action="Paste"/>
<menuitem action="Clear"/>
<separator/>
<menuitem action="Add"/>
<menuitem action="Edit"/>
<menuitem action="Delete"/>
<separator/>
<menuitem action="Find"/>
<menuitem action="Sort"/>
</menu>
<menu action="ViewMenu">
<menu action="ViewResource">
<placeholder name="Viewables" />
</menu>
</menu>
<menu action="CiteMenu">
<menuitem action="Connect"/>
<menuitem action="Cite"/>
<menuitem action="Update"/>
</menu>
<menu action="Settings">
<menuitem action="Fields"/>
<menuitem action="Preferences"/>
<separator/>
<menuitem action="Forget"/>
</menu>
<menu action="HelpMenu">
<menuitem action="Contents"/>
<menuitem action="About"/>
</menu>
</menubar>
<toolbar name="Toolbar">
<toolitem action="Open"/>
<toolitem action="Save"/>
<separator/>
<toolitem action="Add"/>
<separator/>
<toolitem action="Cite"/>
<separator/>
</toolbar>
<popup name="Popup">
<menuitem action="Add"/>
<menuitem action="Edit"/>
<menu action="ViewResource">
<placeholder name="Viewables" />
</menu>
<menuitem action="Delete"/>
</popup>
</ui>
'''
class Document (Connector.Publisher):
def __init__ (self, database):
self.uim = gtk.UIManager ()
self.recents = None
self.viewables = None
self.actiongroup = gtk.ActionGroup ('Main')
self.actiongroup.add_actions ([
# id stock label accel tooltip callback
('File', None, _('_File')),
('EditMenu', None, _('_Edit')),
('ViewMenu', None, _('_View')),
('CiteMenu', None, _('_Cite')),
('Settings', None, _('_Settings')),
('HelpMenu', None, _('_Help')),
('Recent', None, _('Recent documents')),
('New', gtk.STOCK_NEW, None, None, None, self.new_document),
('Open', gtk.STOCK_OPEN, None, None, _('Open a file'), self.ui_open_document),
('Open_URL', None, _('Open _Location'), '<control>l', None, self.ui_open_location),
('Save', gtk.STOCK_SAVE, None, None, _('Save the current file'), self.save_document),
('Save_As', gtk.STOCK_SAVE_AS, None, None, None, self.save_document_as),
('Close', gtk.STOCK_CLOSE, None, None, None, self.close_document),
('Quit', gtk.STOCK_QUIT, None, None, None, self.exit_application),
('Merge', None, _('Merge With...'), '<control>g', None, self.merge_database),
('Query', None, _('External Query...'), '<control>e', None, self.query_database),
('Cut', gtk.STOCK_CUT, None, None, None, self.cut_entry),
('Copy', gtk.STOCK_COPY, None, None, None, self.copy_entry),
('Paste', gtk.STOCK_PASTE, None, None, None, self.paste_entry),
('Clear', gtk.STOCK_CLEAR, None, None, None, self.clear_entries),
('Add', gtk.STOCK_ADD, None, '<shift><control>n', _('Add a new entry'), self.add_entry),
('Delete', gtk.STOCK_DELETE, None, None, None, self.delete_entry),
('Find', gtk.STOCK_FIND, None, None, None, self.find_entries),
('Sort', None, _('S_ort...'), None, None, self.sort_entries),
('Connect', gtk.STOCK_CONNECT, _('C_onnect...'), None, None, self._on_connect),
('Cite', gtk.STOCK_JUMP_TO, _('Cite'), None, _('Cite key(s)'), self.wp_cite),
('Update', gtk.STOCK_EXECUTE, _('Update document'), None, None, self.wp_update),
('Fields', None, _('Fields...'), None, None, self.set_fields),
('Preferences', gtk.STOCK_PREFERENCES, None, None, None, self.set_preferences),
('Forget', None, _('Forget all changes'), None, None, self.forget_changes_cb),
('Contents', gtk.STOCK_HELP, None, None, None, self.on_documentation),
('ViewResource', None, _('_Resource'), None, None, self.view_entry),
])
if gtk.pygtk_version >= (2,6,0):
self.actiongroup.add_actions ([
('About', gtk.STOCK_ABOUT, None, None, None, self.about),
('Edit', gtk.STOCK_EDIT, None, '<shift><control>o', None, self.edit_entry),
])
else:
self.actiongroup.add_actions ([
('About', None, _('_About'), None, None, self.about),
('Edit', None, _('_Edit'), '<shift><control>o', None, self.edit_entry),
])
prev = self.actiongroup.get_action ('Recent')
prev.set_property ('is-important', True)
prev.set_property ('hide-if-empty', False)
view_action = self.actiongroup.get_action ('ViewResource')
view_action.set_property ('hide-if-empty', False)
self.uim.insert_action_group (self.actiongroup, 0)
self.uim.add_ui_from_string (uim_content)
self.uim.ensure_update ()
gp = os.path.join(Utils.glade_root, 'pyblio.glade')
self.xml = gtk.glade.XML (gp, 'main', domain = 'pyblio')
self.xml.signal_autoconnect (self)
self.w = self.xml.get_widget ('main')
self.paned = self.xml.get_widget ('main_pane')
self.w.set_menus (self.uim.get_widget ('/Menubar'))
self.w.set_toolbar (self.uim.get_widget ('/Toolbar'))
self.w.add_accel_group (self.uim.get_accel_group ())
self.w.add_events (gdk.KEY_PRESS_MASK)
self.w_save_btn = self.xml.get_widget ('_w_save_btn')
self.w_save_mnu = self.xml.get_widget ('_w_save_mnu')
# We manually add a simple search area
t = self.uim.get_widget ('/Toolbar')
h = gtk.HBox()
i = gtk.Image()
i.set_from_stock(gtk.STOCK_FIND, gtk.ICON_SIZE_LARGE_TOOLBAR)
h.pack_start(i, False, False)
# create a tooltips object
self.toolbartips = gtk.Tooltips()
self.quick_search = gtk.Entry()
self.quick_search.connect('activate', self.simple_search)
h.pack_start(self.quick_search, False, False)
self.toolbartips.set_tip(self.quick_search, _('Quick search'))
i = gtk.ToolItem()
i.add(h)
t.insert(i, -1)
i.show_all()
# The Index list
self.index = Index.Index (popup = self.uim.get_widget ('/Popup'))
self.paned.add1 (self.index.w)
self.index.Subscribe ('new-entry', self.add_entry)
self.index.Subscribe ('edit-entry', self.edit_entry)
## self.index.Subscribe ('view-entry', self.view_entry)
self.index.Subscribe ('delete-entry', self.delete_entry)
self.index.Subscribe ('select-entry', self.update_display)
self.index.Subscribe ('select-entries', self.freeze_display)
self.index.Subscribe ('drag-received', self.drag_received)
self.index.Subscribe ('drag-moved', self.drag_moved)
self.index.Subscribe ('click-on-field', self.sort_by_field)
self.paned.show_all ()
# The text area
self.display = Entry.Entry ()
self.paned.add2 (self.display.w)
# Status bar
self.statusbar = self.xml.get_widget ('statusbar')
# set window size
ui_width = Utils.config.get_int ('/apps/pyblio/ui/width') or -1
ui_height = Utils.config.get_int ('/apps/pyblio/ui/height') or -1
if ui_width != -1 and ui_height != -1:
self.w.set_default_size (ui_width, ui_height)
# set paned size
paned_height = Utils.config.get_int ('/apps/pyblio/ui/paned') or -1
self.paned.set_position (paned_height)
self.w.show_all ()
# application variables
self.data = database
self.selection = Selection.Selection ()
self.search_dg = None
self.sort_dg = None
self.wp = None # word processor to cite to
self.citator = None # citator that will format the citations
self.changed = 0
self.directory = None
self.editable = False # true when some entry is selected
self.incremental_start = None
self.incremental_search = ''
self.modification_date = None
# for autosave
self.source_id = None
# set the default sort method
default = Utils.config.get_string ('/apps/pyblio/sort/default')
if default is not None: default = pickle.loads (default)
self.sort_view (default)
self._title_set ()
self._set_edit_actions(False)
return
def _title_set (self):
if self.data.key is None:
self.w.set_title (_('Unnamed bibliographic database'))
return
name = os.path.basename (self.data.key.url [2])
self.w.set_title (name)
return
def set_preferences (self, * arg):
from Legacy.GnomeUI import Config
Config.run(self.w)
return
def set_fields (self, * arg):
from Legacy.GnomeUI import Fields
Fields.run (self.w)
return
def forget_changes_cb (self, * arg):
Config.forget_changes()
return
def update_history (self, history):
''' fill the " Previous Documents " menu with the specified list of documents '''
if self.recents:
for mid in self.recents_mid:
self.uim.remove_ui (mid)
self.uim.remove_action_group (self.recents)
self.recents_mid = []
self.recents = gtk.ActionGroup ('Recent')
self.uim.insert_action_group (self.recents, 1)
for item in history:
# Display name in the menu
quoted = string.replace (item [0], '_', '__')
mid = self.uim.new_merge_id ()
self.recents_mid.append (mid)
action = gtk.Action (str (mid), quoted, None, None)
self.recents.add_action (action)
action.connect ('activate', self._history_open_cb, item)
self.uim.add_ui (mid, '/Menubar/File/Recent', str (mid),
str (mid), gtk.UI_MANAGER_MENUITEM, False)
return
def _history_open_cb (self, id, w):
file, type = w
if not self.confirm (): return
self.open_document (file, type)
return
def redisplay_index (self, changed = -1):
''' redisplays the index. If changed is specified, set the
self.changed status to the given value '''
if changed != -1:
self.changed = changed
self.index.display (self.selection.iterator (self.data.iterator ()))
self.update_status ()
return
def format_query (self, style, format, output):
try:
file = open (output, 'w')
except IOError, err:
self.w.error (_("can't open file `%s' for writing:\n%s")
% (output, str (err)))
return
entries = map (lambda x: x.key, self.index.selection ())
if not entries:
iter = self.selection.iterator (self.data.iterator ())
entries = []
e = iter.first ()
while e:
entries.append (e.key)
e = iter.next ()
url = Fields.URL (style)
try:
Pyblio.Style.Utils.generate (url, format, self.data, entries, file)
except RuntimeError, err:
print err
self.w.error (_("Error while parsing `%s':\n%s") % (style, err))
return
def wp_update(self, *arg):
if self.citator:
self.citator.update()
def update_status (self, status = -1):
''' redisplay status bar according to the current status '''
if status != -1: self.changed = status
if self.data.key is None:
text = _("New database")
else:
text = self.data.key.get_url ()
li = len (self.index)
ld = len (self.data)
if li == ld:
if ld == 0: num = _("[no entry]")
elif ld == 1: num = _("[1 entry]")
else: num = _("[%d entries]") % ld
else:
if ld == 0: num = _("[no entry]")
elif ld == 1: num = _("[%d/1 entry]") % li
else: num = _("[%d/%d entries]") % (li, ld)
text = text + ' ' + num
if self.changed:
text = text + ' ' + _("[modified]")
if self.selection.search:
text += ' - ' + _('view limited to: %s') % self.selection_name
self.actiongroup.get_action ('Save').set_property ('sensitive', self.changed)
self.statusbar.set_default (text)
return
def confirm (self):
''' eventually ask for modification cancellation '''
if self.changed:
if Config.get('gnome/old-confirmation-dialog').data:
return Utils.Callback (_("The database has been modified.\nDiscard changes?"),
self.w).answer ()
else:
if Utils.Callback (_("The database has been modified.\nSave changes?"),
self.w).answer () and self.modification_check ():
self.save_document ()
else:
return True
return 1
def modification_check (self):
"""Check for external modification, if necessary,
ask user for permission to save.
Returns True if no modifications or overwrite accepted by user."""
if self.modification_date:
mod_date = os.stat (self.data.key.url [2]) [stat.ST_MTIME]
if mod_date > self.modification_date:
return Utils.Callback (
_("The database has been externally modified.\nOverwrite changes ?"),
self.w).answer ()
return True
def new_document (self, * arg):
''' callback corresponding to the "New Document" button '''
self.issue ('new-document', self)
return
def open_in_new(self, url, how=None, no_name=False):
''' open a document in a new window '''
self.issue('open-in-new', url, how, no_name)
return
def query_database (self, * arg):
''' callback corresponding to the "External Query..." button '''
MedlineUI(self, self.w)
return
def merge_database (self, * arg):
''' add all the entries of another database to the current one '''
# get a new file name
(url, how) = FileSelector.URLFileSelection (_("Merge file"),
has_auto = True).run ()
if url is None: return
try:
iterator = Open.bibiter (url, how = how)
except (Exceptions.ParserError,
Exceptions.FormatError,
Exceptions.FileError), error:
Utils.error_dialog (_("Open error"), error,
parent = self.w)
return
# loop over the entries
errors = []
try:
entry = iterator.first ()
except Exceptions.ParserError, msg:
errors = errors + msg.errors
while entry:
self.data.add (entry)
while 1:
try:
entry = iterator.next ()
break
except Exceptions.ParserError, msg:
errors = errors + list (msg.errors)
continue
self.redisplay_index (1)
if errors:
Utils.error_dialog (_("Merge status"), string.join (errors, '\n'),
parent = self.w)
return
def ui_open_document (self, * arg):
''' callback corresponding to "Open" '''
if not self.confirm (): return
# get a new file name
(url, how) = FileSelector.URLFileSelection (_("Open file")).run ()
if url is None: return
self.open_document (url, how)
return
def ui_open_location (self, * arg):
''' callback corresponding to "Open Location" '''
if not self.confirm (): return
(url, how) = OpenURL.OpenDialog (self.w).run ()
if url == None or url == "": return
self.open_document (url, how)
return
def open_document (self, url, how = None, no_name = False):
Utils.set_cursor (self.w, 'clock')
orig_url = Fields.URL (url)
url = orig_url.get_url ()
restore = False
if orig_url.url [0] == 'file':
name = orig_url.url [2]
auto_save = os.path.join (os.path.dirname (name),
'x-pyblio-save-' + os.path.basename (name))
if os.path.exists (auto_save):
mod_date = os.stat (name) [stat.ST_MTIME]
mod_date_auto = os.stat (auto_save) [stat.ST_MTIME]
if mod_date < mod_date_auto:
restore = Utils.Callback (_("An autosave file was found which is newer than the original file.\nDo you want to restore it?"), self.w).answer ()
if restore: url = auto_save
try:
data = Open.bibopen (url, how = how)
except (Exceptions.ParserError,
Exceptions.FormatError,
Exceptions.FileError), error:
Utils.set_cursor (self.w, 'normal')
Utils.error_dialog (_("Open error"), error,
parent = self.w)
return
# remove the old autosave object
if self.data.key is not None and self.source_id:
gobject.source_remove (self.source_id)
# remove old autosave file if exists
if self.data.key:
if self.data.key.url [0] == 'file':
old_file = self.data.key.url [2]
old_auto_save = os.path.join (os.path.dirname (old_file),
'x-pyblio-save-' + os.path.basename (old_file))
if os.path.exists (old_auto_save):
try:
os.remove (old_auto_save)
except (OSError, IOError), error:
Utils.set_cursor (self.w, 'normal')
self.w.error (_("Unable to remove autosave file `%s':\n%s") % (str (old_auto_save), str (error)))
return
Utils.set_cursor (self.w, 'normal')
if no_name: data.key = None
self.data = data
if restore:
# restore the original url internally,
# and change the document status
self.data.key = orig_url
self.redisplay_index (1)
else:
self.redisplay_index (0)
self._title_set ()
# eventually warn interested objects
self.issue ('open-document', self)
# create autosave object if needed
if Config.get ('base/autosave').data:
savetimeout = Config.get ('base/autosave interval').data
self.source_id = gobject.timeout_add (savetimeout * 60 * 1000, self.autosave, url, self.data.id)
return
def autosave (self, url, how):
''' autosave file as x-pyblio-save-filename '''
if self.data.key.url [0] != 'file': return False
name = self.data.key.url [2]
# create an autosave file
save = os.path.join (os.path.dirname (name),
'x-pyblio-save-' + os.path.basename (name))
if self.changed:
try:
savefile = open (save, 'w')
except (IOError, OSError), error:
self.w.error (_("Error during autosaving:\n%s") % error [1])
return False
iterator = Selection.Selection (sort = self.selection.sort)
Open.bibwrite (iterator.iterator (self.data.iterator ()),
out = savefile, how = how, database=self.data)
savefile.close ()
return True
def save_document (self, * arg):
if self.data.key is None:
self.save_document_as ()
return
file = self.data.key.url [2]
if not self.modification_check ():
return
Utils.set_cursor (self.w, 'clock')
try:
try:
self.data.update (self.selection.sort)
except (OSError, IOError), error:
Utils.set_cursor (self.w, 'normal')
self.w.error (_("Unable to save `%s':\n%s") % (str (self.data.key),
str (error)))
return
except:
etype, value, tb = sys.exc_info ()
traceback.print_exception (etype, value, tb)
Utils.set_cursor (self.w, 'normal')
self.w.error (_("An internal error occured during saving\nTry to Save As..."))
return
Utils.set_cursor (self.w, 'normal')
# get the current modification date
self.modification_date = os.stat (file) [stat.ST_MTIME]
self.update_status (0)
return
def save_document_as (self, * arg):
# get a new file name
(url, how) = FileSelector.URLFileSelection (
_("Save As..."), has_auto = False, is_save = True).run ()
if url is None: return
if os.path.exists (url):
if not Utils.Callback (
_("The file `%s' already exists.\nOverwrite it ?")
% url, parent = self.w).answer ():
return
try:
file = open (url, 'w')
except IOError, error:
self.w.error (_("During opening:\n%s") % error [1])
return
Utils.set_cursor (self.w, 'clock')
iterator = Selection.Selection (sort = self.selection.sort)
Open.bibwrite (iterator.iterator (self.data.iterator ()),
out = file, how = how, database=self.data)
file.close ()
# remove the old autosave object
if self.data.key is not None and self.source_id:
gobject.source_remove (self.source_id)
# remove old autosave file
if self.data.key:
if self.data.key.url [0] == 'file':
old_file = self.data.key.url [2]
old_auto_save = os.path.join (os.path.dirname (old_file),
'x-pyblio-save-' + os.path.basename (old_file))
if os.path.exists (old_auto_save):
try:
os.remove (old_auto_save)
except (OSError, IOError), error:
Utils.set_cursor (self.w, 'normal')
self.w.error (_("Unable to remove autosave file `%s':\n%s") % (str (old_auto_save), str (error)))
return
try:
self.data = Open.bibopen (url, how = how)
except (Exceptions.ParserError,
Exceptions.FormatError,
Exceptions.FileError), error:
Utils.set_cursor (self.w, 'normal')
Utils.error_dialog (_("Reopen error"), error,
parent = self.w)
return
self.redisplay_index ()
self._title_set ()
self.issue ('open-document', self)
Utils.set_cursor (self.w, 'normal')
self.update_status (0)
# create the new autosave object if needed
if Config.get ('base/autosave').data:
savetimeout = Config.get ('base/autosave interval').data
self.source_id = gobject.timeout_add (savetimeout * 60 * 1000, self.autosave, url, self.data.id)
return
def close_document (self, * arg):
self.issue ('close-document', self)
return 1
def close_or_exit (self, * arg):
self.issue ('close-document', self, True)
return 1
def close_document_request (self):
answer = self.confirm ()
# remove autosave object with closing
if answer and self.source_id:
gobject.source_remove (self.source_id)
# remove old autosave file
if answer and self.data.key:
if self.data.key.url [0] == 'file':
old_file = self.data.key.url [2]
old_auto_save = os.path.join (os.path.dirname (old_file),
'x-pyblio-save-' + os.path.basename (old_file))
if os.path.exists (old_auto_save):
try:
os.remove (old_auto_save)
except (OSError, IOError), error:
Utils.set_cursor (self.w, 'normal')
self.w.error (_("Unable to remove autosave file `%s':\n%s") % (str (old_auto_save), str (error)))
return
return answer
def exit_application (self, * arg):
self.issue ('exit-application', self)
return
def drag_moved (self, entries):
if not entries: return
for e in entries:
del self.data [e.key]
self.redisplay_index (1)
return
def drag_received (self, entries):
for entry in entries:
if self.data.would_have_key (entry.key):
if not Utils.Callback (_("An entry called `%s' already exists.\nRename and add it anyway ?")
% entry.key.key, parent = self.w).answer ():
continue
self.changed = 1
self.data.add (entry)
self.redisplay_index ()
self.index.set_scroll (entries [-1])
return
def cut_entry (self, * arg):
entries = self.index.selection ()
if not entries: return
self.index.selection_copy (entries)
for entry in entries:
del self.data [entry.key]
self.redisplay_index (1)
pass
def copy_entry (self, * arg):
self.index.selection_copy (self.index.selection ())
return
def paste_entry (self, * arg):
self.index.selection_paste ()
return
def clear_entries (self, * arg):
if len (self.data) == 0: return
if not Utils.Callback (_("Really remove all the entries ?"),
parent = self.w).answer ():
return
keys = self.data.keys ()
for key in keys:
del self.data [key]
self.redisplay_index (1)
return
def select_all_entries (self, * arg):
self.index.select_all ()
return
def add_entry (self, * arg):
entry = self.data.new_entry (Config.get ('base/defaulttype').data)
edit = Editor.Editor (self.data, entry, self.w, _("Create new entry"))
edit.Subscribe ('commit-edition', self.commit_edition)
return
def edit_entry (self, entries):
if not (type (entries) is types.ListType):
entries = self.index.selection ()
l = len (entries)
if l == 0: return
if l > 5:
if not Utils.Callback (_("Really edit %d entries ?") % l):
return
for entry in entries:
edit = Editor.Editor (self.data, entry, self.w)
edit.Subscribe ('commit-edition', self.commit_edition)
return
def commit_edition (self, old, new):
''' updates the database and the display '''
if old.key != new.key:
if self.data.has_key (old.key):
del self.data [old.key]
if new.key:
self.data [new.key] = new
else:
self.data.add (new)
self.freeze_display(None)
self.redisplay_index (1)
self.index.select_item (new)
return
def view_entry (self, action, *item):
if item:
entry, key, url, value = item [0]
## print 'VIEW ENTRY:', entry, key, url, value
Resource.StartViewer (entry, key, value, parent=self.w, document=self)
else: #print 'Call to VIEW ENTRY ignored'
return
def delete_entry (self, * arg):
''' removes the selected list of items after confirmation '''
entries = self.index.selection ()
l = len (entries)
if l == 0: return
offset = self.index.get_item_position (entries [-1])
if l > 1:
question = _("Remove all the %d entries ?") % len (entries)
else:
question = _("Remove entry `%s' ?") % entries [0].key.key
if not Utils.Callback (question,
parent = self.w).answer ():
return
for entry in entries:
del self.data [entry.key]
self.redisplay_index (1)
self.index.select_item (offset)
return
def simple_search(self, w, *arg):
q = w.get_text().strip()
if q:
try:
test = SearchCls.AnyTester(q.encode('latin-1'))
except UnicodeEncodeError:
self.w.error (_("your search text must contain\nlatin-1 characters only"))
return
else:
test = None
self.limit_view(q, test)
def find_entries (self, * arg):
if self.search_dg is None:
self.search_dg = Search.SearchDialog (self.w)
self.search_dg.Subscribe ('search-data', self.limit_view)
else:
self.search_dg.show ()
return
def limit_view (self, name, search):
self.selection.search = search
self.selection_name = name
self.redisplay_index ()
return
def sort_entries (self, * arg):
sort_dg = SortDialog (self.selection.sort, self.w)
sort_dg.Subscribe ('sort-data', self.sort_view)
return
def sort_view (self, sort):
self.selection.sort = sort
self.redisplay_index ()
return
def sort_by_field (self, field):
if field == '-key-':
mode = Sort.KeySort ()
elif field == '-type-':
mode = Sort.TypeSort ()
else:
mode = Sort.FieldSort (field)
# Check if we are toggling or changing
cur = self.selection.sort
if cur and len (cur.fields) == 1:
cur = cur.fields [0]
# We are still filtering according to the same field,
# simply toggle the direction
if cur == mode:
mode.ascend = - cur.ascend
self.selection.sort = Sort.Sort ([mode])
self.redisplay_index ()
return
def _on_connect(self, *args):
self.wp, self.citator = Citation.Connect(self.data, self.wp).run()
self._set_edit_actions(self.editable)
def wp_cite(self, *arg):
if not self.citator:
return
entries = self.index.selection ()
if not entries:
return
self.citator.cite([x.key for x in entries])
def _set_edit_actions(self, value):
for action in ('Copy', 'Cut', 'Delete', 'Edit', 'ViewResource'):
self.actiongroup.get_action (action).set_property('sensitive', value)
# one can only cite when there is a selection _and_ a current word processor.
self.actiongroup.get_action('Cite').set_property(
'sensitive', value and self.citator is not None)
self.actiongroup.get_action('Update').set_property(
'sensitive', self.citator is not None)
self.editable = value
return
def update_display(self, entry):
if entry:
self.display.display (entry)
self.update_viewables (entry)
self._set_edit_actions(entry is not None)
return
def freeze_display(self, entry):
self.display.clear()
self._set_edit_actions(True)
return
def update_viewables (self, entry):
if self.viewables:
for item in self.viewables_id:
self.uim.remove_ui (item)
self.uim.remove_action_group (self.viewables)
self.viewables_id = []
self.viewables = gtk.ActionGroup ('Viewables')
self.uim.insert_action_group (self.viewables, 1)
viewables = Resource.get_viewables (entry)
for key, url, value in viewables:
## text = u'<span foreground="BLUE" weight="bold">%s</span> %s' %(
## key.upper (), value)
text = u"%s %s" % (key.upper (), value)
mergeid = self.uim.new_merge_id ()
self.viewables_id.append (mergeid)
action = gtk.Action (str(mergeid), text, None, None)
self.viewables.add_action (action)
action.connect ('activate', self.view_entry, (entry, key, url, value))
self.uim.add_ui (mergeid, '/Menubar/ViewMenu/ViewResource', str(mergeid),
str(mergeid), gtk.UI_MANAGER_MENUITEM, False)
self.uim.add_ui (mergeid, '/Popup/ViewResource', str(mergeid),
str(mergeid), gtk.UI_MANAGER_MENUITEM, False)
return
def key_pressed (self, app, event):
# filter out special keys
if event.keyval == gtk.keysyms.Escape:
# the Esc key restores view to "all entries"
self.limit_view (None, None)
self.quick_search.set_text('')
if (event.string < 'a' or event.string > 'z') and \
(event.string < '0' or event.string > '9'): return False
if self.selection.sort is None:
app.flash ("Select a column to search in first.")
return False
if event.string in printable:
# the user searches the first entry in its ordering that starts with this letter
if self.incremental_search == '':
self.incremental_search = event.string
self.incremental_start = event.time
else:
if event.time - self.incremental_start > 1000:
self.incremental_search = event.string
else:
# two keys in a same shot: we search for the composition of the words
self.incremental_search = self.incremental_search + event.string
self.incremental_start = event.time
# search first occurence
if self.index.go_to_first (self.incremental_search,
self.selection.sort.fields [0]):
app.flash ("Searching for '%s...'" % self.incremental_search)
else:
app.flash ("Cannot find '%s...'" % self.incremental_search)
return False
def update_configuration (self):
''' save current informations about the program '''
# Save the graphical aspect of the interface
# 1.- Window size
alloc = self.w.get_allocation ()
Utils.config.set_int ('/apps/pyblio/ui/width', alloc [2])
Utils.config.set_int ('/apps/pyblio/ui/height', alloc [3])
# 2.- Proportion between list and text
height = self.paned.get_position ()
Utils.config.set_int ('/apps/pyblio/ui/paned', height)
# updates the index's config
self.index.update_configuration ()
return
def on_documentation (self, *args):
import gobject
try:
gnome.help_display ('pyblio', None)
except gobject.GError, msg:
self.w.error (_("Can't display documentation:\n%s") % msg)
return
def about (self, *arg):
about = ui.About ('Pyblio',
version.version,
_("This program is copyrighted under the GNU GPL"),
_("GNOME interface to the Pybliographer system."),
['Hervé Dréau',
'Frédéric Gobry',
'Zoltán Kóta',
'Travis Oliphant',
'Darrell Rudmann',
'Peter Schulte-Stracke',
'John Vu'],
['Yuri Bongiorno',
'Frédéric Gobry',
'Zoltán Kóta'],
_('GNOME Translation Team'))
about.set_transient_for (self.w)
link = ui.HRef('http://pybliographer.org/',
_("Pybliographer Home Page"))
link.show ()
about.vbox.pack_start (link)
about.show()
return
| gpl-2.0 | -7,044,868,091,550,441,000 | 31.671875 | 163 | 0.524176 | false |
rouge8/pip | tests/functional/test_install.py | 1 | 65164 | import distutils
import glob
import os
import shutil
import sys
import textwrap
from os.path import curdir, join, pardir
import pytest
from pip._vendor.six import PY2
from pip import __version__ as pip_current_version
from pip._internal import pep425tags
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.models.index import PyPI, TestPyPI
from pip._internal.utils.misc import rmtree
from tests.lib import (
_create_svn_repo,
_create_test_package,
create_basic_wheel_for_package,
create_test_package_with_setup,
need_bzr,
need_mercurial,
path_to_url,
pyversion,
pyversion_tuple,
requirements_file,
)
from tests.lib.filesystem import make_socket_file
from tests.lib.local_repos import local_checkout
from tests.lib.path import Path
skip_if_python2 = pytest.mark.skipif(PY2, reason="Non-Python 2 only")
skip_if_not_python2 = pytest.mark.skipif(not PY2, reason="Python 2 only")
@pytest.mark.parametrize('command', ('install', 'wheel'))
@pytest.mark.parametrize('variant', ('missing_setuptools', 'bad_setuptools'))
def test_pep518_uses_build_env(script, data, common_wheels, command, variant):
if variant == 'missing_setuptools':
script.pip("uninstall", "-y", "setuptools")
elif variant == 'bad_setuptools':
setuptools_mod = script.site_packages_path.joinpath("setuptools.py")
with open(setuptools_mod, 'a') as f:
f.write('\nraise ImportError("toto")')
else:
raise ValueError(variant)
script.pip(
command, '--no-index', '-f', common_wheels, '-f', data.packages,
data.src.joinpath("pep518-3.0"),
)
def test_pep518_build_env_uses_same_pip(
script, data, pip_src, common_wheels, deprecated_python):
"""Ensure the subprocess call to pip for installing the
build dependencies is using the same version of pip.
"""
with open(script.scratch_path / 'pip.py', 'w') as fp:
fp.write('raise ImportError')
script.run(
'python', pip_src / 'src/pip', 'install', '--no-index',
'-f', common_wheels, '-f', data.packages,
data.src.joinpath("pep518-3.0"),
expect_stderr=deprecated_python,
)
def test_pep518_refuses_conflicting_requires(script, data):
create_basic_wheel_for_package(script, 'setuptools', '1.0')
create_basic_wheel_for_package(script, 'wheel', '1.0')
project_dir = data.src.joinpath("pep518_conflicting_requires")
result = script.pip_install_local('-f', script.scratch_path,
project_dir, expect_error=True)
assert (
result.returncode != 0 and
('Some build dependencies for %s conflict with PEP 517/518 supported '
'requirements: setuptools==1.0 is incompatible with '
'setuptools>=40.8.0.' % path_to_url(project_dir)) in result.stderr
), str(result)
def test_pep518_refuses_invalid_requires(script, data, common_wheels):
result = script.pip(
'install', '-f', common_wheels,
data.src.joinpath("pep518_invalid_requires"),
expect_error=True
)
assert result.returncode == 1
assert "does not comply with PEP 518" in result.stderr
def test_pep518_refuses_invalid_build_system(script, data, common_wheels):
result = script.pip(
'install', '-f', common_wheels,
data.src.joinpath("pep518_invalid_build_system"),
expect_error=True
)
assert result.returncode == 1
assert "does not comply with PEP 518" in result.stderr
def test_pep518_allows_missing_requires(script, data, common_wheels):
result = script.pip(
'install', '-f', common_wheels,
data.src.joinpath("pep518_missing_requires"),
expect_stderr=True
)
# Make sure we don't warn when this occurs.
assert "does not comply with PEP 518" not in result.stderr
# We want it to go through isolation for now.
assert "Installing build dependencies" in result.stdout, result.stdout
assert result.returncode == 0
assert result.files_created
def test_pep518_with_user_pip(script, pip_src, data, common_wheels):
"""
Check that build dependencies are installed into the build
environment without using build isolation for the pip invocation.
To ensure that we're not using build isolation when installing
the build dependencies, we install a user copy of pip in the
non-isolated environment, and break pip in the system site-packages,
so that isolated uses of pip will fail.
"""
script.pip("install", "--ignore-installed",
"-f", common_wheels, "--user", pip_src)
system_pip_dir = script.site_packages_path / 'pip'
assert not system_pip_dir.exists()
system_pip_dir.mkdir()
with open(system_pip_dir / '__init__.py', 'w') as fp:
fp.write('raise ImportError\n')
script.pip(
'wheel', '--no-index', '-f', common_wheels, '-f', data.packages,
data.src.joinpath("pep518-3.0"),
)
def test_pep518_with_extra_and_markers(script, data, common_wheels):
script.pip(
'wheel', '--no-index',
'-f', common_wheels,
'-f', data.find_links,
data.src.joinpath("pep518_with_extra_and_markers-1.0"),
)
def test_pep518_with_namespace_package(script, data, common_wheels):
script.pip(
'wheel', '--no-index',
'-f', common_wheels,
'-f', data.find_links,
data.src.joinpath("pep518_with_namespace_package-1.0"),
use_module=True,
)
@pytest.mark.timeout(60)
@pytest.mark.parametrize('command', ('install', 'wheel'))
@pytest.mark.parametrize('package', ('pep518_forkbomb',
'pep518_twin_forkbombs_first',
'pep518_twin_forkbombs_second'))
def test_pep518_forkbombs(script, data, common_wheels, command, package):
package_source = next(data.packages.glob(package + '-[0-9]*.tar.gz'))
result = script.pip(
command, '--no-index', '-v',
'-f', common_wheels,
'-f', data.find_links,
package,
expect_error=True,
)
assert '{1} is already being built: {0} from {1}'.format(
package, path_to_url(package_source),
) in result.stderr, str(result)
@pytest.mark.network
def test_pip_second_command_line_interface_works(
script, pip_src, data, common_wheels, deprecated_python):
"""
Check if ``pip<PYVERSION>`` commands behaves equally
"""
# Re-install pip so we get the launchers.
script.pip_install_local('-f', common_wheels, pip_src)
# On old versions of Python, urllib3/requests will raise a warning about
# the lack of an SSLContext.
kwargs = {'expect_stderr': deprecated_python}
if pyversion_tuple < (2, 7, 9):
kwargs['expect_stderr'] = True
args = ['pip%s' % pyversion]
args.extend(['install', 'INITools==0.2'])
args.extend(['-f', data.packages])
result = script.run(*args, **kwargs)
egg_info_folder = (
script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'initools'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
def test_install_exit_status_code_when_no_requirements(script):
"""
Test install exit status code when no requirements specified
"""
result = script.pip('install', expect_error=True)
assert "You must give at least one requirement to install" in result.stderr
assert result.returncode == ERROR
def test_install_exit_status_code_when_blank_requirements_file(script):
"""
Test install exit status code when blank requirements file specified
"""
script.scratch_path.joinpath("blank.txt").write_text("\n")
script.pip('install', '-r', 'blank.txt')
@pytest.mark.network
def test_basic_install_from_pypi(script):
"""
Test installing a package from PyPI.
"""
result = script.pip('install', '-vvv', 'INITools==0.2')
egg_info_folder = (
script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'initools'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
# Should not display where it's looking for files
assert "Looking in indexes: " not in result.stdout
assert "Looking in links: " not in result.stdout
def test_basic_editable_install(script):
"""
Test editable installation.
"""
result = script.pip('install', '-e', 'INITools==0.2', expect_error=True)
assert (
"INITools==0.2 is not a valid editable requirement"
in result.stderr
)
assert not result.files_created
assert not result.files_updated
@pytest.mark.svn
def test_basic_install_editable_from_svn(script):
"""
Test checking out from svn.
"""
checkout_path = _create_test_package(script)
repo_url = _create_svn_repo(script, checkout_path)
result = script.pip(
'install',
'-e', 'svn+' + repo_url + '#egg=version-pkg'
)
result.assert_installed('version-pkg', with_files=['.svn'])
def _test_install_editable_from_git(script, tmpdir):
"""Test cloning from Git."""
pkg_path = _create_test_package(script, name='testpackage', vcs='git')
args = ['install', '-e', 'git+%s#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args)
result.assert_installed('testpackage', with_files=['.git'])
def test_basic_install_editable_from_git(script, tmpdir):
_test_install_editable_from_git(script, tmpdir)
def test_install_editable_from_git_autobuild_wheel(
script, tmpdir, with_wheel):
_test_install_editable_from_git(script, tmpdir)
@pytest.mark.network
def test_install_editable_uninstalls_existing(data, script, tmpdir):
"""
Test that installing an editable uninstalls a previously installed
non-editable version.
https://github.com/pypa/pip/issues/1548
https://github.com/pypa/pip/pull/1552
"""
to_install = data.packages.joinpath("pip-test-package-0.1.tar.gz")
result = script.pip_install_local(to_install)
assert 'Successfully installed pip-test-package' in result.stdout
result.assert_installed('piptestpackage', editable=False)
result = script.pip(
'install', '-e',
'%s#egg=pip-test-package' %
local_checkout(
'git+https://github.com/pypa/pip-test-package.git', tmpdir,
),
)
result.assert_installed('pip-test-package', with_files=['.git'])
assert 'Found existing installation: pip-test-package 0.1' in result.stdout
assert 'Uninstalling pip-test-package-' in result.stdout
assert 'Successfully uninstalled pip-test-package' in result.stdout
def test_install_editable_uninstalls_existing_from_path(script, data):
"""
Test that installing an editable uninstalls a previously installed
non-editable version from path
"""
to_install = data.src.joinpath('simplewheel-1.0')
result = script.pip_install_local(to_install)
assert 'Successfully installed simplewheel' in result.stdout
simple_folder = script.site_packages / 'simplewheel'
result.assert_installed('simplewheel', editable=False)
assert simple_folder in result.files_created, str(result.stdout)
result = script.pip(
'install', '-e',
to_install,
)
install_path = script.site_packages / 'simplewheel.egg-link'
assert install_path in result.files_created, str(result)
assert 'Found existing installation: simplewheel 1.0' in result.stdout
assert 'Uninstalling simplewheel-' in result.stdout
assert 'Successfully uninstalled simplewheel' in result.stdout
assert simple_folder in result.files_deleted, str(result.stdout)
@need_mercurial
def test_basic_install_editable_from_hg(script, tmpdir):
"""Test cloning and hg+file install from Mercurial."""
pkg_path = _create_test_package(script, name='testpackage', vcs='hg')
url = 'hg+{}#egg=testpackage'.format(path_to_url(pkg_path))
assert url.startswith('hg+file')
args = ['install', '-e', url]
result = script.pip(*args)
result.assert_installed('testpackage', with_files=['.hg'])
@need_mercurial
def test_vcs_url_final_slash_normalization(script, tmpdir):
"""
Test that presence or absence of final slash in VCS URL is normalized.
"""
pkg_path = _create_test_package(script, name='testpackage', vcs='hg')
args = ['install', '-e', 'hg+%s/#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args)
result.assert_installed('testpackage', with_files=['.hg'])
@need_bzr
def test_install_editable_from_bazaar(script, tmpdir):
"""Test checking out from Bazaar."""
pkg_path = _create_test_package(script, name='testpackage', vcs='bazaar')
args = ['install', '-e', 'bzr+%s/#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args)
result.assert_installed('testpackage', with_files=['.bzr'])
@pytest.mark.network
@need_bzr
def test_vcs_url_urlquote_normalization(script, tmpdir):
"""
Test that urlquoted characters are normalized for repo URL comparison.
"""
script.pip(
'install', '-e',
'%s/#egg=django-wikiapp' %
local_checkout(
'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp'
'/release-0.1',
tmpdir,
),
)
def test_basic_install_from_local_directory(script, data):
"""
Test installing from a local directory.
"""
to_install = data.packages.joinpath("FSPkg")
result = script.pip('install', to_install, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_basic_install_relative_directory(script, data):
"""
Test installing a requirement using a relative path.
"""
egg_info_file = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
egg_link_file = (
script.site_packages / 'FSPkg.egg-link'
)
package_folder = script.site_packages / 'fspkg'
# Compute relative install path to FSPkg from scratch path.
full_rel_path = Path(
os.path.relpath(data.packages.joinpath('FSPkg'), script.scratch_path)
)
full_rel_url = (
'file:' + full_rel_path.replace(os.path.sep, '/') + '#egg=FSPkg'
)
embedded_rel_path = script.scratch_path.joinpath(full_rel_path)
# For each relative path, install as either editable or not using either
# URLs with egg links or not.
for req_path in (full_rel_path, full_rel_url, embedded_rel_path):
# Regular install.
result = script.pip('install', req_path,
cwd=script.scratch_path)
assert egg_info_file in result.files_created, str(result)
assert package_folder in result.files_created, str(result)
script.pip('uninstall', '-y', 'fspkg')
# Editable install.
result = script.pip('install', '-e' + req_path,
cwd=script.scratch_path)
assert egg_link_file in result.files_created, str(result)
script.pip('uninstall', '-y', 'fspkg')
def test_install_quiet(script, data):
"""
Test that install -q is actually quiet.
"""
# Apparently if pip install -q is not actually quiet, then it breaks
# everything. See:
# https://github.com/pypa/pip/issues/3418
# https://github.com/docker-library/python/issues/83
to_install = data.packages.joinpath("FSPkg")
result = script.pip('install', '-qqq', to_install, expect_error=False)
assert result.stdout == ""
assert result.stderr == ""
def test_hashed_install_success(script, data, tmpdir):
"""
Test that installing various sorts of requirements with correct hashes
works.
Test file URLs and index packages (which become HTTP URLs behind the
scenes).
"""
file_url = path_to_url(
(data.packages / 'simple-1.0.tar.gz').resolve())
with requirements_file(
'simple2==1.0 --hash=sha256:9336af72ca661e6336eb87bc7de3e8844d853e'
'3848c2b9bbd2e8bf01db88c2c7\n'
'{simple} --hash=sha256:393043e672415891885c9a2a0929b1af95fb866d6c'
'a016b42d2e6ce53619b653'.format(simple=file_url),
tmpdir) as reqs_file:
script.pip_install_local('-r', reqs_file.resolve(), expect_error=False)
def test_hashed_install_failure(script, tmpdir):
"""Test that wrong hashes stop installation.
This makes sure prepare_files() is called in the course of installation
and so has the opportunity to halt if hashes are wrong. Checks on various
kinds of hashes are in test_req.py.
"""
with requirements_file('simple2==1.0 --hash=sha256:9336af72ca661e6336eb87b'
'c7de3e8844d853e3848c2b9bbd2e8bf01db88c2c\n',
tmpdir) as reqs_file:
result = script.pip_install_local('-r',
reqs_file.resolve(),
expect_error=True)
assert len(result.files_created) == 0
def test_install_from_local_directory_with_symlinks_to_directories(
script, data):
"""
Test installing from a local directory containing symlinks to directories.
"""
to_install = data.packages.joinpath("symlinks")
result = script.pip('install', to_install, expect_error=False)
pkg_folder = script.site_packages / 'symlinks'
egg_info_folder = (
script.site_packages / 'symlinks-0.1.dev0-py%s.egg-info' % pyversion
)
assert pkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
@pytest.mark.skipif("sys.platform == 'win32' or sys.version_info < (3,)")
def test_install_from_local_directory_with_socket_file(script, data, tmpdir):
"""
Test installing from a local directory containing a socket file.
"""
egg_info_file = (
script.site_packages / "FSPkg-0.1.dev0-py%s.egg-info" % pyversion
)
package_folder = script.site_packages / "fspkg"
to_copy = data.packages.joinpath("FSPkg")
to_install = tmpdir.joinpath("src")
shutil.copytree(to_copy, to_install)
# Socket file, should be ignored.
socket_file_path = os.path.join(to_install, "example")
make_socket_file(socket_file_path)
result = script.pip("install", "--verbose", to_install, expect_error=False)
assert package_folder in result.files_created, str(result.stdout)
assert egg_info_file in result.files_created, str(result)
assert str(socket_file_path) in result.stderr
def test_install_from_local_directory_with_no_setup_py(script, data):
"""
Test installing from a local directory with no 'setup.py'.
"""
result = script.pip('install', data.root, expect_error=True)
assert not result.files_created
assert "is not installable." in result.stderr
assert "Neither 'setup.py' nor 'pyproject.toml' found." in result.stderr
def test_editable_install__local_dir_no_setup_py(
script, data, deprecated_python):
"""
Test installing in editable mode from a local directory with no setup.py.
"""
result = script.pip('install', '-e', data.root, expect_error=True)
assert not result.files_created
msg = result.stderr
if deprecated_python:
assert 'File "setup.py" not found. ' in msg
else:
assert msg.startswith('ERROR: File "setup.py" not found. ')
assert 'pyproject.toml' not in msg
def test_editable_install__local_dir_no_setup_py_with_pyproject(
script, deprecated_python):
"""
Test installing in editable mode from a local directory with no setup.py
but that does have pyproject.toml.
"""
local_dir = script.scratch_path.joinpath('temp')
local_dir.mkdir()
pyproject_path = local_dir.joinpath('pyproject.toml')
pyproject_path.write_text('')
result = script.pip('install', '-e', local_dir, expect_error=True)
assert not result.files_created
msg = result.stderr
if deprecated_python:
assert 'File "setup.py" not found. ' in msg
else:
assert msg.startswith('ERROR: File "setup.py" not found. ')
assert 'A "pyproject.toml" file was found' in msg
@skip_if_not_python2
@pytest.mark.xfail
def test_install_argparse_shadowed(script):
# When argparse is in the stdlib, we support installing it
# even though that's pretty useless because older packages did need to
# depend on it, and not having its metadata will cause pkg_resources
# requirements checks to fail // trigger easy-install, both of which are
# bad.
# XXX: Note, this test hits the outside-environment check, not the
# in-stdlib check, because our tests run in virtualenvs...
result = script.pip('install', 'argparse>=1.4')
assert "Not uninstalling argparse" in result.stdout
@pytest.mark.network
@skip_if_python2
def test_upgrade_argparse_shadowed(script):
# If argparse is installed - even if shadowed for imported - we support
# upgrading it and properly remove the older versions files.
script.pip('install', 'argparse==1.3')
result = script.pip('install', 'argparse>=1.4')
assert "Not uninstalling argparse" not in result.stdout
def test_install_curdir(script, data):
"""
Test installing current directory ('.').
"""
run_from = data.packages.joinpath("FSPkg")
# Python 2.4 Windows balks if this exists already
egg_info = join(run_from, "FSPkg.egg-info")
if os.path.isdir(egg_info):
rmtree(egg_info)
result = script.pip('install', curdir, cwd=run_from, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_pardir(script, data):
"""
Test installing parent directory ('..').
"""
run_from = data.packages.joinpath("FSPkg", "fspkg")
result = script.pip('install', pardir, cwd=run_from, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_global_option(script):
"""
Test using global distutils options.
(In particular those that disable the actual install action)
"""
result = script.pip(
'install', '--global-option=--version', "INITools==0.1",
expect_stderr=True)
assert 'INITools==0.1\n' in result.stdout
def test_install_with_hacked_egg_info(script, data):
"""
test installing a package which defines its own egg_info class
"""
run_from = data.packages.joinpath("HackedEggInfo")
result = script.pip('install', '.', cwd=run_from)
assert 'Successfully installed hackedegginfo-0.0.0\n' in result.stdout
@pytest.mark.network
def test_install_using_install_option_and_editable(script, tmpdir):
"""
Test installing a tool using -e and --install-option
"""
folder = 'script_folder'
script.scratch_path.joinpath(folder).mkdir()
url = 'git+git://github.com/pypa/pip-test-package'
result = script.pip(
'install', '-e', '%s#egg=pip-test-package' %
local_checkout(url, tmpdir),
'--install-option=--script-dir=%s' % folder,
expect_stderr=True)
script_file = (
script.venv / 'src' / 'pip-test-package' /
folder / 'pip-test-package' + script.exe
)
assert script_file in result.files_created
@pytest.mark.network
@need_mercurial
def test_install_global_option_using_editable(script, tmpdir):
"""
Test using global distutils options, but in an editable installation
"""
url = 'hg+http://bitbucket.org/runeh/anyjson'
result = script.pip(
'install', '--global-option=--version', '-e',
'%[email protected]#egg=anyjson' % local_checkout(url, tmpdir),
expect_stderr=True)
assert 'Successfully installed anyjson' in result.stdout
@pytest.mark.network
def test_install_package_with_same_name_in_curdir(script):
"""
Test installing a package with the same name of a local folder
"""
script.scratch_path.joinpath("mock==0.6").mkdir()
result = script.pip('install', 'mock==0.6')
egg_folder = script.site_packages / 'mock-0.6.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
mock100_setup_py = textwrap.dedent('''\
from setuptools import setup
setup(name='mock',
version='100.1')''')
def test_install_folder_using_dot_slash(script):
"""
Test installing a folder using pip install ./foldername
"""
script.scratch_path.joinpath("mock").mkdir()
pkg_path = script.scratch_path / 'mock'
pkg_path.joinpath("setup.py").write_text(mock100_setup_py)
result = script.pip('install', './mock')
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_install_folder_using_slash_in_the_end(script):
r"""
Test installing a folder using pip install foldername/ or foldername\
"""
script.scratch_path.joinpath("mock").mkdir()
pkg_path = script.scratch_path / 'mock'
pkg_path.joinpath("setup.py").write_text(mock100_setup_py)
result = script.pip('install', 'mock' + os.path.sep)
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_install_folder_using_relative_path(script):
"""
Test installing a folder using pip install folder1/folder2
"""
script.scratch_path.joinpath("initools").mkdir()
script.scratch_path.joinpath("initools", "mock").mkdir()
pkg_path = script.scratch_path / 'initools' / 'mock'
pkg_path.joinpath("setup.py").write_text(mock100_setup_py)
result = script.pip('install', Path('initools') / 'mock')
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_package_which_contains_dev_in_name(script):
"""
Test installing package from PyPI which contains 'dev' in name
"""
result = script.pip('install', 'django-devserver==0.0.4')
devserver_folder = script.site_packages / 'devserver'
egg_info_folder = (
script.site_packages / 'django_devserver-0.0.4-py%s.egg-info' %
pyversion
)
assert devserver_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
assert Path('scratch') / 'target' / 'simple' in result.files_created, (
str(result)
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
assert not Path('scratch') / 'target' / 'simple' in result.files_updated
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
assert Path('scratch') / 'target' / 'simple' in result.files_updated, (
str(result)
)
egg_folder = (
Path('scratch') / 'target' / 'simple-2.0-py%s.egg-info' % pyversion)
assert egg_folder in result.files_created, (
str(result)
)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
assert singlemodule_py in result.files_created, str(result)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
assert singlemodule_py in result.files_updated, str(result)
def test_install_nonlocal_compatible_wheel(script, data):
target_dir = script.scratch_path / 'target'
# Test install with --target
result = script.pip(
'install',
'-t', target_dir,
'--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--python', '3',
'--platform', 'fakeplat',
'--abi', 'fakeabi',
'simplewheel',
)
assert result.returncode == SUCCESS
distinfo = Path('scratch') / 'target' / 'simplewheel-2.0-1.dist-info'
assert distinfo in result.files_created
# Test install without --target
result = script.pip(
'install',
'--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--python', '3',
'--platform', 'fakeplat',
'--abi', 'fakeabi',
'simplewheel',
expect_error=True
)
assert result.returncode == ERROR
def test_install_nonlocal_compatible_wheel_path(script, data):
target_dir = script.scratch_path / 'target'
# Test a full path requirement
result = script.pip(
'install',
'-t', target_dir,
'--no-index',
'--only-binary=:all:',
Path(data.packages) / 'simplewheel-2.0-py3-fakeabi-fakeplat.whl'
)
assert result.returncode == SUCCESS
distinfo = Path('scratch') / 'target' / 'simplewheel-2.0.dist-info'
assert distinfo in result.files_created
# Test a full path requirement (without --target)
result = script.pip(
'install',
'--no-index',
'--only-binary=:all:',
Path(data.packages) / 'simplewheel-2.0-py3-fakeabi-fakeplat.whl',
expect_error=True
)
assert result.returncode == ERROR
def test_install_with_target_and_scripts_no_warning(script, with_wheel):
"""
Test that installing with --target does not trigger the "script not
in PATH" warning (issue #5201)
"""
target_dir = script.scratch_path / 'target'
pkga_path = script.scratch_path / 'pkga'
pkga_path.mkdir()
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1',
py_modules=["pkga"],
entry_points={
'console_scripts': ['pkga=pkga:main']
}
)
"""))
pkga_path.joinpath("pkga.py").write_text(textwrap.dedent("""
def main(): pass
"""))
result = script.pip('install', '--target', target_dir, pkga_path)
# This assertion isn't actually needed, if we get the script warning
# the script.pip() call will fail with "stderr not expected". But we
# leave the assertion to make the intention of the code clearer.
assert "--no-warn-script-location" not in result.stderr, str(result)
def test_install_package_with_root(script, data):
"""
Test installing a package using pip install --root
"""
root_dir = script.scratch_path / 'root'
result = script.pip(
'install', '--root', root_dir, '-f', data.find_links, '--no-index',
'simple==1.0',
)
normal_install_path = (
script.base_path / script.site_packages / 'simple-1.0-py%s.egg-info' %
pyversion
)
# use distutils to change the root exactly how the --root option does it
from distutils.util import change_root
root_path = change_root(
os.path.join(script.scratch, 'root'),
normal_install_path
)
assert root_path in result.files_created, str(result)
# Should show find-links location in output
assert "Looking in indexes: " not in result.stdout
assert "Looking in links: " in result.stdout
def test_install_package_with_prefix(script, data):
"""
Test installing a package using pip install --prefix
"""
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '--prefix', prefix_path, '-f', data.find_links,
'--no-binary', 'simple', '--no-index', 'simple==1.0',
)
rel_prefix_path = script.scratch / 'prefix'
install_path = (
distutils.sysconfig.get_python_lib(prefix=rel_prefix_path) /
'simple-1.0-py{}.egg-info'.format(pyversion)
)
assert install_path in result.files_created, str(result)
def test_install_editable_with_prefix(script):
# make a dummy project
pkga_path = script.scratch_path / 'pkga'
pkga_path.mkdir()
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
if hasattr(sys, "pypy_version_info"):
site_packages = os.path.join(
'prefix', 'lib', 'python{}'.format(pyversion), 'site-packages')
else:
site_packages = distutils.sysconfig.get_python_lib(prefix='prefix')
# make sure target path is in PYTHONPATH
pythonpath = script.scratch_path / site_packages
pythonpath.mkdir(parents=True)
script.environ["PYTHONPATH"] = pythonpath
# install pkga package into the absolute prefix directory
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '--editable', pkga_path, '--prefix', prefix_path)
# assert pkga is installed at correct location
install_path = script.scratch / site_packages / 'pkga.egg-link'
assert install_path in result.files_created, str(result)
def test_install_package_conflict_prefix_and_user(script, data):
"""
Test installing a package using pip install --prefix --user errors out
"""
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '-f', data.find_links, '--no-index', '--user',
'--prefix', prefix_path, 'simple==1.0',
expect_error=True, quiet=True,
)
assert (
"Can not combine '--user' and '--prefix'" in result.stderr
)
def test_install_package_that_emits_unicode(script, data):
"""
Install a package with a setup.py that emits UTF-8 output and then fails.
Refs https://github.com/pypa/pip/issues/326
"""
to_install = data.packages.joinpath("BrokenEmitsUTF8")
result = script.pip(
'install', to_install, expect_error=True, expect_temp=True, quiet=True,
)
assert (
'FakeError: this package designed to fail on install' in result.stderr
), 'stderr: {}'.format(result.stderr)
assert 'UnicodeDecodeError' not in result.stderr
assert 'UnicodeDecodeError' not in result.stdout
def test_install_package_with_utf8_setup(script, data):
"""Install a package with a setup.py that declares a utf-8 encoding."""
to_install = data.packages.joinpath("SetupPyUTF8")
script.pip('install', to_install)
def test_install_package_with_latin1_setup(script, data):
"""Install a package with a setup.py that declares a latin-1 encoding."""
to_install = data.packages.joinpath("SetupPyLatin1")
script.pip('install', to_install)
def test_url_req_case_mismatch_no_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages contains Upper-1.0.tar.gz and Upper-2.0.tar.gz
'requiresupper' has install_requires = ['upper']
"""
Upper = '/'.join((data.find_links, 'Upper-1.0.tar.gz'))
result = script.pip(
'install', '--no-index', '-f', data.find_links, Upper, 'requiresupper'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
def test_url_req_case_mismatch_file_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages3 contains Dinner-1.0.tar.gz and Dinner-2.0.tar.gz
'requiredinner' has install_requires = ['dinner']
This test is similar to test_url_req_case_mismatch_no_index; that test
tests behaviour when using "--no-index -f", while this one does the same
test when using "--index-url". Unfortunately this requires a different
set of packages as it requires a prepared index.html file and
subdirectory-per-package structure.
"""
Dinner = '/'.join((data.find_links3, 'dinner', 'Dinner-1.0.tar.gz'))
result = script.pip(
'install', '--index-url', data.find_links3, Dinner, 'requiredinner'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
def test_url_incorrect_case_no_index(script, data):
"""
Same as test_url_req_case_mismatch_no_index, except testing for the case
where the incorrect case is given in the name of the package to install
rather than in a requirements file.
"""
result = script.pip(
'install', '--no-index', '-f', data.find_links, "upper",
)
# only Upper-2.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_url_incorrect_case_file_index(script, data):
"""
Same as test_url_req_case_mismatch_file_index, except testing for the case
where the incorrect case is given in the name of the package to install
rather than in a requirements file.
"""
result = script.pip(
'install', '--index-url', data.find_links3, "dinner",
expect_stderr=True,
)
# only Upper-2.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
# Should show index-url location in output
assert "Looking in indexes: " in result.stdout
assert "Looking in links: " not in result.stdout
@pytest.mark.network
def test_compiles_pyc(script):
"""
Test installing with --compile on
"""
del script.environ["PYTHONDONTWRITEBYTECODE"]
script.pip("install", "--compile", "--no-binary=:all:", "INITools==0.2")
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "initools/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "initools/__pycache__/__init__*.pyc"
)
assert any(exists)
@pytest.mark.network
def test_no_compiles_pyc(script):
"""
Test installing from wheel with --compile on
"""
del script.environ["PYTHONDONTWRITEBYTECODE"]
script.pip("install", "--no-compile", "--no-binary=:all:", "INITools==0.2")
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "initools/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "initools/__pycache__/__init__*.pyc"
)
assert not any(exists)
def test_install_upgrade_editable_depending_on_other_editable(script):
script.scratch_path.joinpath("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
script.pip('install', '--editable', pkga_path)
result = script.pip('list', '--format=freeze')
assert "pkga==0.1" in result.stdout
script.scratch_path.joinpath("pkgb").mkdir()
pkgb_path = script.scratch_path / 'pkgb'
pkgb_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkgb',
version='0.1',
install_requires=['pkga'])
"""))
script.pip('install', '--upgrade', '--editable', pkgb_path, '--no-index')
result = script.pip('list', '--format=freeze')
assert "pkgb==0.1" in result.stdout
def test_install_subprocess_output_handling(script, data):
args = ['install', data.src.joinpath('chattymodule')]
# Regular install should not show output from the chatty setup.py
result = script.pip(*args)
assert 0 == result.stdout.count("HELLO FROM CHATTYMODULE")
script.pip("uninstall", "-y", "chattymodule")
# With --verbose we should show the output.
# Only count examples with sys.argv[1] == egg_info, because we call
# setup.py multiple times, which should not count as duplicate output.
result = script.pip(*(args + ["--verbose"]), expect_stderr=True)
assert 1 == result.stderr.count("HELLO FROM CHATTYMODULE egg_info")
script.pip("uninstall", "-y", "chattymodule")
# If the install fails, then we *should* show the output... but only once,
# even if --verbose is given.
result = script.pip(*(args + ["--global-option=--fail"]),
expect_error=True)
assert 1 == result.stderr.count("I DIE, I DIE")
result = script.pip(*(args + ["--global-option=--fail", "--verbose"]),
expect_error=True)
assert 1 == result.stderr.count("I DIE, I DIE")
def test_install_log(script, data, tmpdir):
# test that verbose logs go to "--log" file
f = tmpdir.joinpath("log.txt")
args = ['--log=%s' % f,
'install', data.src.joinpath('chattymodule')]
result = script.pip(*args)
assert 0 == result.stdout.count("HELLO FROM CHATTYMODULE")
with open(f, 'r') as fp:
# one from egg_info, one from install
assert 2 == fp.read().count("HELLO FROM CHATTYMODULE")
def test_install_topological_sort(script, data):
args = ['install', 'TopoRequires4', '--no-index', '-f', data.packages]
res = str(script.pip(*args, expect_error=False))
order1 = 'TopoRequires, TopoRequires2, TopoRequires3, TopoRequires4'
order2 = 'TopoRequires, TopoRequires3, TopoRequires2, TopoRequires4'
assert order1 in res or order2 in res, res
def test_install_wheel_broken(script, with_wheel):
res = script.pip_install_local('wheelbroken', expect_stderr=True)
assert "Successfully installed wheelbroken-0.1" in str(res), str(res)
def test_cleanup_after_failed_wheel(script, with_wheel):
res = script.pip_install_local('wheelbrokenafter', expect_stderr=True)
# One of the effects of not cleaning up is broken scripts:
script_py = script.bin_path / "script.py"
assert script_py.exists(), script_py
shebang = open(script_py, 'r').readline().strip()
assert shebang != '#!python', shebang
# OK, assert that we *said* we were cleaning up:
assert "Running setup.py clean for wheelbrokenafter" in str(res), str(res)
def test_install_builds_wheels(script, data, with_wheel):
# We need to use a subprocess to get the right value on Windows.
res = script.run('python', '-c', (
'from pip._internal.utils import appdirs; '
'print(appdirs.user_cache_dir("pip"))'
))
wheels_cache = os.path.join(res.stdout.rstrip('\n'), 'wheels')
# NB This incidentally tests a local tree + tarball inputs
# see test_install_editable_from_git_autobuild_wheel for editable
# vcs coverage.
to_install = data.packages.joinpath('requires_wheelbroken_upper')
res = script.pip(
'install', '--no-index', '-f', data.find_links,
to_install, expect_stderr=True)
expected = ("Successfully installed requires-wheelbroken-upper-0"
" upper-2.0 wheelbroken-0.1")
# Must have installed it all
assert expected in str(res), str(res)
wheels = []
for top, dirs, files in os.walk(wheels_cache):
wheels.extend(files)
# and built wheels for upper and wheelbroken
assert "Building wheel for upper" in str(res), str(res)
assert "Building wheel for wheelb" in str(res), str(res)
# Wheels are built for local directories, but not cached.
assert "Building wheel for requir" in str(res), str(res)
# wheelbroken has to run install
# into the cache
assert wheels != [], str(res)
# and installed from the wheel
assert "Running setup.py install for upper" not in str(res), str(res)
# Wheels are built for local directories, but not cached.
assert "Running setup.py install for requir" not in str(res), str(res)
# wheelbroken has to run install
assert "Running setup.py install for wheelb" in str(res), str(res)
# We want to make sure we used the correct implementation tag
assert wheels == [
"Upper-2.0-{}-none-any.whl".format(pep425tags.implementation_tag),
]
def test_install_no_binary_disables_building_wheels(script, data, with_wheel):
to_install = data.packages.joinpath('requires_wheelbroken_upper')
res = script.pip(
'install', '--no-index', '--no-binary=upper', '-f', data.find_links,
to_install, expect_stderr=True)
expected = ("Successfully installed requires-wheelbroken-upper-0"
" upper-2.0 wheelbroken-0.1")
# Must have installed it all
assert expected in str(res), str(res)
# and built wheels for wheelbroken only
assert "Building wheel for wheelb" in str(res), str(res)
# Wheels are built for local directories, but not cached across runs
assert "Building wheel for requir" in str(res), str(res)
# Don't build wheel for upper which was blacklisted
assert "Building wheel for upper" not in str(res), str(res)
# Wheels are built for local directories, but not cached across runs
assert "Running setup.py install for requir" not in str(res), str(res)
# And these two fell back to sdist based installed.
assert "Running setup.py install for wheelb" in str(res), str(res)
assert "Running setup.py install for upper" in str(res), str(res)
def test_install_no_binary_disables_cached_wheels(script, data, with_wheel):
# Seed the cache
script.pip(
'install', '--no-index', '-f', data.find_links,
'upper')
script.pip('uninstall', 'upper', '-y')
res = script.pip(
'install', '--no-index', '--no-binary=:all:', '-f', data.find_links,
'upper', expect_stderr=True)
assert "Successfully installed upper-2.0" in str(res), str(res)
# No wheel building for upper, which was blacklisted
assert "Building wheel for upper" not in str(res), str(res)
# Must have used source, not a cached wheel to install upper.
assert "Running setup.py install for upper" in str(res), str(res)
def test_install_editable_with_wrong_egg_name(script):
script.scratch_path.joinpath("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
result = script.pip(
'install', '--editable', 'file://%s#egg=pkgb' % pkga_path
)
assert ("Generating metadata for package pkgb produced metadata "
"for project name pkga. Fix your #egg=pkgb "
"fragments.") in result.stderr
assert "Successfully installed pkga" in str(result), str(result)
def test_install_tar_xz(script, data):
try:
import lzma # noqa
except ImportError:
pytest.skip("No lzma support")
res = script.pip('install', data.packages / 'singlemodule-0.0.1.tar.xz')
assert "Successfully installed singlemodule-0.0.1" in res.stdout, res
def test_install_tar_lzma(script, data):
try:
import lzma # noqa
except ImportError:
pytest.skip("No lzma support")
res = script.pip('install', data.packages / 'singlemodule-0.0.1.tar.lzma')
assert "Successfully installed singlemodule-0.0.1" in res.stdout, res
def test_double_install(script):
"""
Test double install passing with two same version requirements
"""
result = script.pip('install', 'pip', 'pip',
expect_error=False)
msg = "Double requirement given: pip (already in pip, name='pip')"
assert msg not in result.stderr
def test_double_install_fail(script):
"""
Test double install failing with two different version requirements
"""
result = script.pip('install', 'pip==*', 'pip==7.1.2', expect_error=True)
msg = ("Double requirement given: pip==7.1.2 (already in pip==*, "
"name='pip')")
assert msg in result.stderr
def _get_expected_error_text():
return (
"Package 'pkga' requires a different Python: {} not in '<1.0'"
).format('.'.join(map(str, sys.version_info[:3])))
def test_install_incompatible_python_requires(script):
script.scratch_path.joinpath("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
python_requires='<1.0',
version='0.1')
"""))
result = script.pip('install', pkga_path, expect_error=True)
assert _get_expected_error_text() in result.stderr, str(result)
def test_install_incompatible_python_requires_editable(script):
script.scratch_path.joinpath("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
python_requires='<1.0',
version='0.1')
"""))
result = script.pip(
'install', '--editable=%s' % pkga_path, expect_error=True)
assert _get_expected_error_text() in result.stderr, str(result)
def test_install_incompatible_python_requires_wheel(script, with_wheel):
script.scratch_path.joinpath("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
python_requires='<1.0',
version='0.1')
"""))
script.run(
'python', 'setup.py', 'bdist_wheel', '--universal', cwd=pkga_path)
result = script.pip('install', './pkga/dist/pkga-0.1-py2.py3-none-any.whl',
expect_error=True)
assert _get_expected_error_text() in result.stderr, str(result)
def test_install_compatible_python_requires(script):
script.scratch_path.joinpath("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
python_requires='>1.0',
version='0.1')
"""))
res = script.pip('install', pkga_path)
assert "Successfully installed pkga-0.1" in res.stdout, res
@pytest.mark.network
def test_install_pep508_with_url(script):
res = script.pip(
'install', '--no-index',
'packaging@https://files.pythonhosted.org/packages/2f/2b/'
'c681de3e1dbcd469537aefb15186b800209aa1f299d933d23b48d85c9d56/'
'packaging-15.3-py2.py3-none-any.whl#sha256='
'ce1a869fe039fbf7e217df36c4653d1dbe657778b2d41709593a0003584405f4'
)
assert "Successfully installed packaging-15.3" in str(res), str(res)
@pytest.mark.network
def test_install_pep508_with_url_in_install_requires(script):
pkga_path = create_test_package_with_setup(
script, name='pkga', version='1.0',
install_requires=[
'packaging@https://files.pythonhosted.org/packages/2f/2b/'
'c681de3e1dbcd469537aefb15186b800209aa1f299d933d23b48d85c9d56/'
'packaging-15.3-py2.py3-none-any.whl#sha256='
'ce1a869fe039fbf7e217df36c4653d1dbe657778b2d41709593a0003584405f4'
],
)
res = script.pip('install', pkga_path)
assert "Successfully installed packaging-15.3" in str(res), str(res)
def test_install_pep508_with_url_in_install_requires_url_change_wheel(script):
dep_v1_path = create_basic_wheel_for_package(
script, name='dep', version='1.0',
)
dep_v2_path = create_basic_wheel_for_package(
script, name='dep', version='2.0',
)
pkga_path = create_basic_wheel_for_package(
script, name='pkga', version='1.0',
depends=['dep @ ' + path_to_url(dep_v1_path)],
)
res = script.pip('install', pkga_path)
assert "Successfully installed dep-1.0" in str(res), str(res)
pkga_path.rmtree()
# Updating the URL to the dependency installs the updated dependency
pkga_path = create_basic_wheel_for_package(
script, name='pkga', version='2.0',
depends=['dep @ ' + path_to_url(dep_v2_path)],
)
res = script.pip('install', pkga_path)
assert "Successfully installed dep-2.0" in str(res), str(res)
res = script.pip('install', pkga_path)
# pip can determine the version from a wheel's filename, so the
# dependency is not reinstalled if the URL doesn't change
assert "Requirement already satisfied: dep==2.0" in str(res), str(res)
def test_install_pep508_with_url_in_install_requires_url_change_directory(
script):
dep_v1_path = create_test_package_with_setup(
script, name='dep', version='1.0',
)
# Rename the package directory so it doesn't get overwritten when
# creating the package for dep_v2
dep_v1_path.move(dep_v1_path.folder / 'dep_v1')
dep_v1_path = dep_v1_path.folder / 'dep_v1'
dep_v2_path = create_test_package_with_setup(
script, name='dep', version='2.0',
)
pkga_path = create_basic_wheel_for_package(
script, name='pkga', version='1.0',
depends=['dep @ ' + path_to_url(dep_v1_path)],
)
res = script.pip('install', pkga_path)
assert "Successfully installed dep-1.0" in str(res), str(res)
pkga_path.rmtree()
# Updating the URL to the dependency installs the updated dependency
pkga_path = create_basic_wheel_for_package(
script, name='pkga', version='2.0',
depends=['dep @ ' + path_to_url(dep_v2_path)],
)
res = script.pip('install', pkga_path)
assert "Successfully installed dep-2.0" in str(res), str(res)
res = script.pip('install', pkga_path)
# pip can't determine versions from a directory name, so it will always
# reinstall the dependency
assert "Successfully installed dep-2.0" in str(res), str(res)
@pytest.mark.network
@pytest.mark.parametrize('index', (PyPI.simple_url, TestPyPI.simple_url))
def test_install_from_test_pypi_with_ext_url_dep_is_blocked(script, index):
res = script.pip(
'install',
'--index-url',
index,
'pep-508-url-deps',
expect_error=True,
)
error_message = (
"Packages installed from PyPI cannot depend on packages "
"which are not also hosted on PyPI."
)
error_cause = (
"pep-508-url-deps depends on sampleproject@ "
"https://github.com/pypa/sampleproject/archive/master.zip"
)
assert res.returncode == 1
assert error_message in res.stderr, str(res)
assert error_cause in res.stderr, str(res)
def test_installing_scripts_outside_path_prints_warning(script):
result = script.pip_install_local(
"--prefix", script.scratch_path, "script_wheel1"
)
assert "Successfully installed script-wheel1" in result.stdout, str(result)
assert "--no-warn-script-location" in result.stderr
def test_installing_scripts_outside_path_can_suppress_warning(script):
result = script.pip_install_local(
"--prefix", script.scratch_path, "--no-warn-script-location",
"script_wheel1"
)
assert "Successfully installed script-wheel1" in result.stdout, str(result)
assert "--no-warn-script-location" not in result.stderr
def test_installing_scripts_on_path_does_not_print_warning(script):
result = script.pip_install_local("script_wheel1")
assert "Successfully installed script-wheel1" in result.stdout, str(result)
assert "--no-warn-script-location" not in result.stderr
def test_installed_files_recorded_in_deterministic_order(script, data):
"""
Ensure that we record the files installed by a package in a deterministic
order, to make installs reproducible.
"""
to_install = data.packages.joinpath("FSPkg")
result = script.pip('install', to_install, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info = 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
installed_files_path = (
script.site_packages / egg_info / 'installed-files.txt'
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert installed_files_path in result.files_created, str(result)
installed_files_path = result.files_created[installed_files_path].full
installed_files_lines = [
p for p in Path(installed_files_path).read_text().split('\n') if p
]
assert installed_files_lines == sorted(installed_files_lines)
def test_install_conflict_results_in_warning(script, data):
pkgA_path = create_test_package_with_setup(
script,
name='pkgA', version='1.0', install_requires=['pkgb == 1.0'],
)
pkgB_path = create_test_package_with_setup(
script,
name='pkgB', version='2.0',
)
# Install pkgA without its dependency
result1 = script.pip('install', '--no-index', pkgA_path, '--no-deps')
assert "Successfully installed pkgA-1.0" in result1.stdout, str(result1)
# Then install an incorrect version of the dependency
result2 = script.pip(
'install', '--no-index', pkgB_path, allow_stderr_error=True,
)
assert "pkga 1.0 has requirement pkgb==1.0" in result2.stderr, str(result2)
assert "Successfully installed pkgB-2.0" in result2.stdout, str(result2)
def test_install_conflict_warning_can_be_suppressed(script, data):
pkgA_path = create_test_package_with_setup(
script,
name='pkgA', version='1.0', install_requires=['pkgb == 1.0'],
)
pkgB_path = create_test_package_with_setup(
script,
name='pkgB', version='2.0',
)
# Install pkgA without its dependency
result1 = script.pip('install', '--no-index', pkgA_path, '--no-deps')
assert "Successfully installed pkgA-1.0" in result1.stdout, str(result1)
# Then install an incorrect version of the dependency; suppressing warning
result2 = script.pip(
'install', '--no-index', pkgB_path, '--no-warn-conflicts'
)
assert "Successfully installed pkgB-2.0" in result2.stdout, str(result2)
def test_target_install_ignores_distutils_config_install_prefix(script):
prefix = script.scratch_path / 'prefix'
distutils_config = Path(os.path.expanduser('~'),
'pydistutils.cfg' if sys.platform == 'win32'
else '.pydistutils.cfg')
distutils_config.write_text(textwrap.dedent(
'''
[install]
prefix=%s
''' % str(prefix)))
target = script.scratch_path / 'target'
result = script.pip_install_local('simplewheel', '-t', target)
assert "Successfully installed simplewheel" in result.stdout
relative_target = os.path.relpath(target, script.base_path)
relative_script_base = os.path.relpath(prefix, script.base_path)
assert relative_target in result.files_created
assert relative_script_base not in result.files_created
@pytest.mark.network
@pytest.mark.skipif("sys.platform != 'win32'")
@pytest.mark.parametrize('pip_name', [
'pip',
'pip{}'.format(sys.version_info[0]),
'pip{}.{}'.format(*sys.version_info[:2]),
'pip.exe',
'pip{}.exe'.format(sys.version_info[0]),
'pip{}.{}.exe'.format(*sys.version_info[:2])
])
def test_protect_pip_from_modification_on_windows(script, pip_name):
"""
Test that pip modification command using ``pip install ...``
raises an error on Windows.
"""
command = [pip_name, 'install', 'pip != {}'.format(pip_current_version)]
result = script.run(*command, expect_error=True)
new_command = [sys.executable, '-m', 'pip'] + command[1:]
expected_message = (
'To modify pip, please run the following command:\n{}'
.format(' '.join(new_command))
)
assert expected_message in result.stderr, str(result)
@pytest.mark.network
@pytest.mark.skipif("sys.platform != 'win32'")
def test_protect_pip_from_modification_via_deps_on_windows(script):
"""
Test ``pip install pkga`` raises an error on Windows
if `pkga` implicitly tries to upgrade pip.
"""
pkga_wheel_path = create_basic_wheel_for_package(
script,
'pkga', '0.1',
depends=['pip != {}'.format(pip_current_version)],
)
# Make sure pip install pkga raises an error
args = ['install', pkga_wheel_path]
result = script.pip(*args, expect_error=True, use_module=False)
new_command = [sys.executable, '-m', 'pip'] + args
expected_message = (
'To modify pip, please run the following command:\n{}'
.format(' '.join(new_command))
)
assert expected_message in result.stderr, str(result)
@pytest.mark.network
@pytest.mark.skipif("sys.platform != 'win32'")
def test_protect_pip_from_modification_via_sub_deps_on_windows(script):
"""
Test ``pip install pkga`` raises an error on Windows
if sub-dependencies of `pkga` implicitly tries to upgrade pip.
"""
# Make a wheel for pkga which requires pkgb
pkga_wheel_path = create_basic_wheel_for_package(
script,
'pkga', '0.1',
depends=['pkgb'],
)
# Make a wheel for pkgb which requires pip
pkgb_wheel_path = create_basic_wheel_for_package(
script,
'pkgb', '0.1',
depends=['pip != {}'.format(pip_current_version)],
)
# Make sure pip install pkga raises an error
args = [
'install', pkga_wheel_path, '--find-links', pkgb_wheel_path.parent
]
result = script.pip(*args, expect_error=True, use_module=False)
new_command = [sys.executable, '-m', 'pip'] + args
expected_message = (
'To modify pip, please run the following command:\n{}'
.format(' '.join(new_command))
)
assert expected_message in result.stderr, str(result)
@pytest.mark.parametrize(
'install_args, expected_message', [
([], 'Requirement already satisfied: pip in'),
(['--upgrade'], 'Requirement already up-to-date: pip in'),
]
)
@pytest.mark.parametrize("use_module", [True, False])
def test_install_pip_does_not_modify_pip_when_satisfied(
script, install_args, expected_message, use_module):
"""
Test it doesn't upgrade the pip if it already satisfies the requirement.
"""
result = script.pip_install_local(
'pip', *install_args, use_module=use_module
)
assert expected_message in result.stdout, str(result)
def test_ignore_yanked_file(script, data):
"""
Test ignore a "yanked" file.
"""
result = script.pip(
'install', 'simple',
'--index-url', data.index_url('yanked'),
)
# Make sure a "yanked" release is ignored
assert 'Successfully installed simple-2.0\n' in result.stdout, str(result)
def test_install_yanked_file_and_print_warning(script, data):
"""
Test install a "yanked" file and print a warning.
Yanked files are always ignored, unless they are the only file that
matches a version specifier that "pins" to an exact version (PEP 592).
"""
result = script.pip(
'install', 'simple==3.0',
'--index-url', data.index_url('yanked'),
expect_stderr=True,
)
expected_warning = 'Reason for being yanked: test reason message'
assert expected_warning in result.stderr, str(result)
# Make sure a "yanked" release is installed
assert 'Successfully installed simple-3.0\n' in result.stdout, str(result)
| mit | -1,385,187,613,620,103,400 | 35.920113 | 79 | 0.652461 | false |
IEEE-NITK/SummerProjects17 | Competitive-Coding/Assignment-2/Priyam/DFS.py | 1 | 1889 | class Vertex:
def __init__(self, n):
self.name = n
self.neighbors = list()
self.discovery = 0
self.finish = 0
self.color = 'black'
def add_neighbor(self, v):
if v not in self.neighbors:
self.neighbors.append(v)
self.neighbors.sort()
class Graph:
vertices = {}
time = 0
def add_vertex(self, vertex):
if isinstance(vertex, Vertex) and vertex.name not in self.vertices:
self.vertices[vertex.name] = vertex
return True
else:
return False
def add_edge(self, u, v):
if u in self.vertices and v in self.vertices:
for key, value in self.vertices.items():
if key == u:
value.add_neighbor(v)
if key == v:
value.add_neighbor(u)
return True
else:
return False
def print_graph(self):
for key in sorted(list(self.vertices.keys())):
print(key + str(self.vertices[key].neighbors) + " " + str(self.vertices[key].discovery) + "/" + str(
self.vertices[key].finish))
def _dfs(self, vertex):
global time
vertex.color = 'red'
vertex.discovery = time
time += 1
for v in vertex.neighbors:
if self.vertices[v].color == 'black':
self._dfs(self.vertices[v])
vertex.color = 'blue'
vertex.finish = time
time += 1
def dfs(self, vertex):
global time
time = 1
self._dfs(vertex)
n, m = map(int, raw_input().split())
g = Graph()
a = Vertex('1')
g.add_vertex(a)
for i in xrange(1, n+1):
g.add_vertex(Vertex(str(i)))
edges = []
for i in xrange(m):
edges.append(raw_input().replace(' ',''))
for edge in edges:
g.add_edge(edge[:1], edge[1:])
g.dfs(a)
g.print_graph() | mit | 7,833,423,528,937,795,000 | 24.540541 | 113 | 0.523557 | false |
stscieisenhamer/ginga | ginga/examples/gtk/example2_gtk.py | 1 | 8631 | #! /usr/bin/env python
#
# example2_gtk.py -- Simple, configurable FITS viewer.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import sys, os
import logging, logging.handlers
from ginga import AstroImage
from ginga.gtkw import GtkHelp
from ginga.gtkw.ImageViewGtk import CanvasView
from ginga.canvas.CanvasObject import get_canvas_types
from ginga import colors
from ginga.misc import log
import gtk
STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'
class FitsViewer(object):
def __init__(self, logger):
self.logger = logger
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
root = gtk.Window(gtk.WINDOW_TOPLEVEL)
root.set_title("Gtk2 CanvasView Example")
root.set_border_width(2)
root.connect("delete_event", lambda w, e: quit(w))
self.root = root
self.select = GtkHelp.FileSelection(root)
vbox = gtk.VBox(spacing=2)
fi = CanvasView(logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.set_zoom_algorithm('rate')
fi.set_zoomrate(1.4)
fi.show_pan_mark(True)
fi.set_callback('drag-drop', self.drop_file)
fi.set_callback('none-move', self.motion)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_setActive(True)
self.fitsimage = fi
bd = fi.get_bindings()
bd.enable_all(True)
# canvas that we will draw on
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.set_drawtype('rectangle', color='lightblue')
canvas.setSurface(fi)
self.canvas = canvas
# add canvas to view
private_canvas = fi.get_canvas()
private_canvas.register_for_cursor_drawing(fi)
private_canvas.add(canvas)
canvas.ui_setActive(True)
self.drawtypes = canvas.get_drawtypes()
self.drawtypes.sort()
# add a color bar
#fi.show_color_bar(True)
fi.show_focus_indicator(True)
# add little mode indicator that shows keyboard modal states
fi.show_mode_indicator(True, corner='ur')
w = fi.get_widget()
w.set_size_request(512, 512)
vbox.pack_start(w, fill=True, expand=True)
self.readout = gtk.Label("")
vbox.pack_start(self.readout, fill=True, expand=False)
hbox = gtk.HBox(spacing=5)
wdrawtype = GtkHelp.combo_box_new_text()
index = 0
for name in self.drawtypes:
wdrawtype.insert_text(index, name)
index += 1
index = self.drawtypes.index('rectangle')
wdrawtype.set_active(index)
wdrawtype.connect('changed', self.set_drawparams)
self.wdrawtype = wdrawtype
wdrawcolor = GtkHelp.combo_box_new_text()
index = 0
for name in self.drawcolors:
wdrawcolor.insert_text(index, name)
index += 1
index = self.drawcolors.index('lightblue')
wdrawcolor.set_active(index)
wdrawcolor.connect('changed', self.set_drawparams)
self.wdrawcolor = wdrawcolor
wfill = GtkHelp.CheckButton("Fill")
wfill.sconnect('toggled', self.set_drawparams)
self.wfill = wfill
walpha = GtkHelp.SpinButton()
adj = walpha.get_adjustment()
adj.configure(0.0, 0.0, 1.0, 0.1, 0.1, 0)
walpha.set_value(1.0)
walpha.set_digits(1)
walpha.sconnect('value-changed', self.set_drawparams)
self.walpha = walpha
wclear = gtk.Button("Clear Canvas")
wclear.connect('clicked', self.clear_canvas)
wopen = gtk.Button("Open File")
wopen.connect('clicked', self.open_file)
wquit = gtk.Button("Quit")
wquit.connect('clicked', quit)
for w in (wquit, wclear, walpha, gtk.Label("Alpha:"),
wfill, wdrawcolor, wdrawtype, wopen):
hbox.pack_end(w, fill=False, expand=False)
vbox.pack_start(hbox, fill=False, expand=False)
root.add(vbox)
def get_widget(self):
return self.root
def set_drawparams(self, w):
index = self.wdrawtype.get_active()
kind = self.drawtypes[index]
index = self.wdrawcolor.get_active()
fill = self.wfill.get_active()
alpha = self.walpha.get_value()
params = { 'color': self.drawcolors[index],
'alpha': alpha,
#'cap': 'ball',
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self, w):
self.canvas.delete_all_objects()
def load_file(self, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.fitsimage.set_image(image)
self.root.set_title(filepath)
def open_file(self, w):
self.select.popup("Open FITS file", self.load_file)
def drop_file(self, fitsimage, paths):
fileName = paths[0]
self.load_file(fileName)
def motion(self, fitsimage, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = fitsimage.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = fitsimage.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = fitsimage.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.set_text(text)
def quit(self, w):
gtk.main_quit()
return True
def main(options, args):
logger = log.get_logger("example2", options=options)
# Check whether user wants to use OpenCv
if options.opencv:
from ginga import trcalc
try:
trcalc.use('opencv')
except Exception as e:
logger.warning("failed to set OpenCv preference: %s" % (str(e)))
# Check whether user wants to use OpenCL
elif options.opencl:
from ginga import trcalc
try:
trcalc.use('opencl')
except Exception as e:
logger.warning("failed to set OpenCL preference: %s" % (str(e)))
fv = FitsViewer(logger)
root = fv.get_widget()
root.show_all()
if len(args) > 0:
fv.load_file(args[0])
gtk.main()
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--opencv", dest="opencv", default=False,
action="store_true",
help="Use OpenCv acceleration")
optprs.add_option("--opencl", dest="opencl", default=False,
action="store_true",
help="Use OpenCL acceleration")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
log.addlogopts(optprs)
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| bsd-3-clause | 7,025,757,477,024,090,000 | 29.715302 | 100 | 0.579771 | false |
rcosnita/fantastico | fantastico/contrib/dynamic_menu/models/menus.py | 1 | 3240 | '''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <[email protected]>
.. py:module:: fantastico.contrib.dynamic_menu.models.menu
'''
from fantastico.mvc import BASEMODEL
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from sqlalchemy.orm import relationship
class DynamicMenu(BASEMODEL):
'''This class defines supported attributes for a dynamic menu. In this version, users can easily define the following menu
attributes:
#. Menu unique identifier (read only).
#. Menu name.'''
__tablename__ = "menus"
id = Column("id", Integer, primary_key=True, autoincrement=True)
name = Column("name", String(150), nullable=False)
def __init__(self, name):
'''This constructor initialize the menu with desired name.
:param name: Dynamic menu friendly name.
:type name: string
'''
self.name = name
class DynamicMenuItem(BASEMODEL):
'''This class defines supported attributes for a dynamic menu item. In this version, users can easily define the following
menu item attributes:
#. Item unique identifier.
#. A target which will display the menu url. Currently, all http targets are valid.
#. Item url where user will be redirected after click / tap.
#. Item friendly title which will be displayed in environments which support tooltips.
#. Item label displayed to user.
'''
__tablename__ = "menu_items"
id = Column("id", Integer, primary_key=True, autoincrement=True)
target = Column("target", String(50), nullable=False)
url = Column("url", String(255), nullable=False)
title = Column("title", String(255), nullable=False)
label = Column("label", String(255), nullable=False)
menu_id = Column("menu_id", Integer, ForeignKey(DynamicMenu.id))
menu = relationship(DynamicMenu, primaryjoin=menu_id == DynamicMenu.id)
def __init__(self, target, url, title, label, menu_id):
'''This constructor initialized all mandatory attributes of a dynamic menu item.'''
self.target = target
self.url = url
self.title = title
self.label = label
self.menu_id = menu_id
| mit | -8,364,358,867,303,549,000 | 42.783784 | 126 | 0.723765 | false |
ddietze/pyFSRS | core/FilePickerCtrl.py | 1 | 5078 | """
.. module: FilePickerCtrl
:platform: Windows
.. moduleauthor:: Daniel R. Dietze <[email protected]>
A simple file and path picker widget consisting of a text field and a button.
..
This file is part of the pyFSRS app.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014-2016 Daniel Dietze <[email protected]>.
"""
import wx
import wx.lib.newevent
import os
FileSelectEvent, EVT_FILE_SELECT = wx.lib.newevent.NewEvent()
class FilePickerCtrl(wx.Panel):
"""FilePickerCtrl is a simple file picker widget for wxPython consisting of a text-field and a
'select file'-button.
:param wxWindow parent: Parent window.
:param int id: Widget it (default=-1).
:param str startDir: Initial directory to display in search dialog (default='' for current).
:param str mode: Type of dialog ('open', 'save', 'path' = default).
:param kwargs: Other parameters that are passed on to the wxPanel constructor.
"""
def __init__(self, parent, id=-1, startDir='', mode='path', **kwargs):
wx.Panel.__init__(self, parent, id, **kwargs)
self.startDir = startDir
if mode not in ["open", "save", "path"]:
raise ValueError("Mode must be 'path', 'open' or 'save'!")
if mode == "open":
self.mode = wx.FD_OPEN
self.dlgt = "Choose file to open.."
elif mode == "save":
self.mode = wx.SAVE
self.dlgt = "Choose file to save.."
else:
self.mode = None
self.dlgt = "Choose path.."
# create widget
self.rborder = 0
self.outersizer = wx.BoxSizer(wx.VERTICAL)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.input = wx.TextCtrl(self, -1, "")
self.button = wx.Button(self, -1, "...", style=wx.BU_EXACTFIT)
self.sizer.Add(self.input, 1, wx.EXPAND | wx.RIGHT, 3)
self.sizer.Add(self.button, 0)
self.outersizer.Add(self.sizer, 1, wx.EXPAND | wx.RIGHT, self.rborder)
self.SetSizer(self.outersizer)
self.Fit()
self.SetAutoLayout(True)
if 'size' in kwargs:
self.SetSize(kwargs['size'])
self.button.Bind(wx.EVT_BUTTON, self.onBrowse)
def GetSize(self):
"""Return wxSize-object containing widget dimensions.
"""
size = wx.Panel.GetSize(self)
return wx.Size(size[0] - self.rborder, size[1])
def SetSize(self, w, h=None):
"""Set widget size.
:param mixed w: Width in pixel if h is not None. Otherwise tuple containing width and height.
:param int h: Height in pixel (default=None).
"""
if h is None:
wx.Panel.SetSize(self, (w[0] + self.rborder, w[1]))
else:
wx.Panel.SetSize(self, (w + self.rborder, h))
def GetValue(self):
"""Return selected path.
"""
return self.input.GetValue()
def SetValue(self, value):
"""Set current path.
"""
self.input.SetValue(str(value))
def Enable(self, value=True):
"""Enable or disable widget when value is True (default) or False.
"""
self.input.Enable(value)
return self.button.Enable(value)
def Disable(self):
"""Disable widget.
"""
self.Enable(False)
def onBrowse(self, event):
"""Handle the button click event by displaying a file or path dialog.
Emits a `FileSelectEvent` when the user clicks OK.
"""
current = self.GetValue()
directory = os.path.split(current)
if os.path.isdir(current):
directory = current
current = ''
elif directory and os.path.isdir(directory[0]):
current = directory[1]
directory = directory[0]
else:
directory = self.startDir
if self.mode is None:
dlg = wx.DirDialog(self, self.dlgt, directory, wx.DD_DIR_MUST_EXIST)
else:
dlg = wx.FileDialog(self, self.dlgt, directory, current, "*.*", self.mode)
if dlg.ShowModal() == wx.ID_OK:
self.SetValue(dlg.GetPath())
# Create the event
evt = FileSelectEvent(filename=self.GetValue())
# Post the event
wx.PostEvent(self, evt)
dlg.Destroy()
| gpl-3.0 | -5,094,602,708,831,065,000 | 30.551282 | 101 | 0.589011 | false |
flashdagger/commandeur | commandeur/cmdtools.py | 1 | 1552 | """
Module docstring
"""
from collections import defaultdict
import re
def parse_doc_arg(line):
"""
parse a section of argument annotations
returns a dict with all values found
"""
assert isinstance(line, str)
info = dict()
pattern = re.compile(r"(.*?)(?:\((.*)\))?\s*:((?:.*\n?.+)+)")
match = re.match(pattern, line)
if match is not None:
groups = match.groups()
for item, value in zip(['name', 'metavar', 'desc'], groups):
if value is not None:
value = value.strip()
info[item] = value
return info
def parse_doc(doc):
"""
parse docstring with Google sytle
returns a dict with all sections found
each section contains a list with all
paragraphs found
"""
info = defaultdict(list)
if doc is None:
return info
assert isinstance(doc, str)
lines = doc.splitlines()
section = 'header'
lastindent = -1
mayconnect = False
for line in lines:
indent, text = re.match(r"^(\s*)(.*?)\s*$", line).groups()
match = re.match("([A-Z][a-z]+):", text)
if match is not None:
section, = match.groups()
elif text:
if len(indent) > lastindent and mayconnect:
text = info[section].pop() + "\n" + text
if section != 'header':
lastindent = len(indent)
mayconnect = True
info[section].append(text)
elif text == "":
mayconnect = False
return info
| mit | -2,259,209,158,375,754,200 | 21.171429 | 68 | 0.541237 | false |
devlights/try-python | trypython/stdlib/re_/re04.py | 1 | 5007 | """
正規表現のサンプルです。
アトミックグループ (Atomic Groups) について
REFERENCES:: http://bit.ly/2O3jVNn
http://bit.ly/2NXqocl
http://bit.ly/2NVGi71
http://bit.ly/2NXEg6m
"""
import re
import regex
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import stopwatch, pr
_message = '1' * 300000000
# noinspection PyMethodMayBeStatic
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------------------------------
# アトミックグループについて
# -----------------------------------------
# 正規表現で処理する場合、処理速度に差がでるのは以下の部分
# - バックトラックの発生頻度
# バックトラックの発生を以下に防ぐかによって処理速度に差が出る。
# 通常の正規表現は「最長一致」であり、指定されたパターンで「可能な限り」
# 前に進んでいく。例えば, 「123456」という文字列があった場合に
# 「\d*」というパターンを指定すると、数字の連続をすべて飲み込む。
# 「\d*:」というパターンを指定した場合でも、最初の「\d*」が
# 数字部分全部を飲み込み、その後で、残りの「:」についてマッチするかどうかを
# 判定することになる。マッチしないので、正規表現エンジンはバックトラックして
# 位置を探しなおす。このバックトラックが発生すればするほど処理に時間がかかる。
#
# このような場合に、「ここまできて失敗したのなら、バックトラックする必要なし」
# ということを事前に通知することができる。
# このときに指定するのが「アトミックグループ」となる。
# アトミックグループの挙動は、「グループ内のパターンに一旦マッチした後は
# その中のステートを全て破棄し、バックトラックさせないようにする」というもの。
#
# アトミックグループは、以下の書式で定義する
# (?>pattern)
# 上のパターンをアトミックグループの指定に変更すると以下のようになる。
# (?>\d*):
#
# ただし、問題が一つあって、python の標準モジュール re は
# アトミックグループに対応していない
# 無理矢理同じことを行うようには出来るみたい。(http://bit.ly/2O3jVNn 参照)
#
# 標準モジュールではないが、PYPI に regex というモジュールがあり
# こちらは、アトミックグループに対応している。(http://bit.ly/2NXqocl 参照)
# インストールする場合は以下を実行
# $ python -m pip install regex
#
# アトミックグループ指定している場合、バックトラックが発生しないので
# マッチしない場合の判定がとても速くなる。(通常のモードだとマッチしないか
# どうかを最終判定するまでにバックトラックを繰り返さないといけないため)
# ------------------------------------------------------------------------
# アトミックグループ指定なし
stopwatch(self.normal)()
# 標準モジュール re で、無理矢理
stopwatch(self.atomic_stdlib_re)()
# 外部ライブラリ rexex でアトミックグループ指定
stopwatch(self.atomic_regex_module)()
def normal(self):
global _message
m = re.match(r'\d*:', _message)
if m:
pr('normal', 'マッチした')
else:
pr('normal', 'マッチしない')
def atomic_stdlib_re(self):
global _message
m = re.match(r'(?=(?P<tmp>\d*:))(?P=tmp)', _message)
if m:
pr('atomic_stdlib_re', 'マッチした')
else:
pr('atomic_stdlib_re', 'マッチしない')
def atomic_regex_module(self):
"""Python の 標準モジュール re は、アトミックグループをサポートしていない。
http://bit.ly/2O3jVNn に記載されているように、無理やり評価させることも可能
らしいのだが、「regex」モジュールを pip でインストールして利用する方が楽。
「regex」モジュールは、(?>pattern)の書式をサポートしている。
"""
global _message
m = regex.match(r'(?>\d*):', _message)
if m:
pr('atomic_regex_module', 'マッチした')
else:
pr('atomic_regex_module', 'マッチしない')
def go():
obj = Sample()
obj.exec()
| mit | 7,720,292,043,671,647,000 | 28.567308 | 82 | 0.546341 | false |
Tehsmash/networking-cisco | networking_cisco/plugins/ml2/drivers/cisco/nexus/constants.py | 1 | 1927 | # Copyright (c) 2011-2016 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
RESERVED_PORT_HOST_ID = 'reserved_port'
CREDENTIAL_USERNAME = 'user_name'
CREDENTIAL_PASSWORD = 'password'
VPCPOOL = 'vpc_pool'
USERNAME = 'username'
PASSWORD = 'password'
SSHPORT = 'ssh_port'
PHYSNET = 'physnet'
IF_PC_DEPRECATE = 'intfcfg.portchannel'
IF_PC = 'intfcfg_portchannel'
HTTPS_VERIFY = 'https_verify'
HTTPS_CERT = 'https_local_certificate'
NVE_SRC_INTF = 'nve_src_intf'
UNAME_TUPLE = 0
PW_TUPLE = 1
HTTPS_VERIFY_TUPLE = 2
HTTPS_CERT_TUPLE = 3
COOKIE_TUPLE = 4
NETWORK_ADMIN = 'network_admin'
CISCO_NEXUS_ML2_MECH_DRIVER_V2 = 'cisco_nexus'
TYPE_NEXUS_VXLAN = 'nexus_vxlan'
NVE_INT_NUM = '1'
NEXUS_MAX_VLAN_NAME_LEN = 32
NO_DUPLICATE = 0
DUPLICATE_VLAN = 1
DUPLICATE_PORT = 2
NEXUS_TYPE_INVALID = -1
NEXUS_3K = 3
NEXUS_5K = 5
NEXUS_7K = 7
NEXUS_9K = 9
MAX_NEXUS_SSH_SESSIONS = 8
REPLAY_FAILURES = '_replay_failures'
FAIL_CONTACT = '_contact'
FAIL_CONFIG = '_config'
RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 = "RESERVED_NEXUS_SWITCH_DEVICE_ID_R1"
NO_PORT_ID = "NONE"
NO_VLAN_OR_VNI_ID = '0'
SWITCH_ACTIVE = "ACTIVE"
SWITCH_RESTORE_S1 = "RESTORE_S1"
SWITCH_RESTORE_S2 = "RESTORE_S2"
SWITCH_INACTIVE = "INACTIVE"
CREATE_VLAN_SEND_SIZE = 20
CREATE_VLAN_BATCH = 200
CREATE_PORT_VLAN_LENGTH = 20
NOT_NATIVE = False
UNCONFIGURED_VLAN = "1-4094"
MINVPC = 1
MAXVPC = 4096
| apache-2.0 | -4,350,822,669,582,887,400 | 23.392405 | 78 | 0.71562 | false |
w0pke/oppgavegenerator | oppgavegen/views/game_views.py | 1 | 7835 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response, redirect
from django.http import HttpResponseForbidden
from django.template import RequestContext
from oppgavegen.generation_folder.generation import generate_level
from oppgavegen.view_logic.rating import *
from oppgavegen.view_logic.progress import *
from oppgavegen.models import Set, Chapter, Level
from oppgavegen.forms import QuestionForm
from oppgavegen.view_logic.submit_and_answer import *
@login_required
def game(request, set_id):
context = RequestContext(request)
set_title = Set.objects.get(pk=set_id).name
return render_to_response('game/screen.html', {'set_id': set_id, 'set_title': set_title}, context)
def chapters(request, set_id):
game_set = Set.objects.get(pk=set_id)
set_title = game_set.name
is_requirement = game_set.is_requirement
set_chapters = game_set.chapters.all()
context = RequestContext(request)
medals = [] # Both lists get updated in chapter_progress
completed = []
if is_requirement:
# In case we want to do something special if the set is a requirement type set
progress_number = chapter_progress(request.user, game_set, medals, completed)
else:
progress_number = chapter_progress(request.user, game_set, medals, completed)
order = game_set.order
set_chapters_ordered = []
for x in order.split(','):
for chapter in set_chapters:
if chapter.pk == int(x):
set_chapters_ordered.append(chapter)
break
if request.is_ajax():
response = render_to_response('game/chapters.html',
{'chapters': set_chapters_ordered, 'medals': json.dumps(medals),
'completed': json.dumps(completed), 'progress_number': progress_number,
'set_id': set_id, "is_requirement": is_requirement}, context)
else:
response = render_to_response('game/chapters_noajax.html',
{'chapters': set_chapters_ordered, 'medals': json.dumps(medals),
'completed': json.dumps(completed), 'progress_number': progress_number,
'set_id': set_id, "set_title": set_title, "is_requirement": is_requirement}, context)
return response
def levels(request, chapter_id):
game_chapter = Chapter.objects.get(pk=chapter_id)
in_requirement_set = game_chapter.in_requirement_set
chapter_levels = game_chapter.levels.all()
chapter_title = game_chapter.name
context = RequestContext(request)
if in_requirement_set:
progress_number = len(chapter_levels)
else:
progress_number = calculate_progress(request.user, game_chapter)
star_per_level = get_stars_per_level(request.user, game_chapter)
order = game_chapter.order
chapter_levels_ordered = []
for x in order.split(','):
for chapter in chapter_levels:
if chapter.pk == int(x):
chapter_levels_ordered.append(chapter)
break
if request.is_ajax():
return render_to_response('game/levels.html',
{'levels': chapter_levels_ordered, 'chapter_title': chapter_title,
'progress_number': progress_number, 'spl': star_per_level, 'chapter_id': chapter_id,
'in_requirement_set':in_requirement_set},
context)
else:
return render_to_response('game/levels_noajax.html',
{'levels': chapter_levels_ordered, 'chapter_title': chapter_title,
'progress_number': progress_number, 'spl': star_per_level, 'chapter_id': chapter_id,
'in_requirement_set':in_requirement_set},
context)
@login_required
def get_template(request):
"""Gets a template for a given level"""
context = RequestContext(request)
#if request.method == 'POST':
context_dict = {'message': 'Noe har gått feil.'}
form = request.POST
if int(form.get('level_id')) == None:
return redirect('/')
level_id = int(form.get('level_id'))
chapter_id = int(form.get('chapter_id'))
set_id = int(form.get('set_id'))
set = Set.objects.get(pk=set_id)
#if check_for_level_skip(request.user, Chapter.objects.get(pk=chapter_id), level_id):
# return render_to_response('game/template.html', context_dict, context)
context['set_title'] = set.name
context['set_id'] = set_id
context['chapter_id'] = chapter_id
context['chapter_title'] = Chapter.objects.get(pk=chapter_id).name
context['level_title'] = Level.objects.get(pk=level_id).name
context['level_id'] = level_id
context_dict = generate_level(request.user, level_id)
context_dict['rating'] = get_user_rating(request.user)
level = Level.objects.get(pk=level_id)
context_dict['stars'] = get_user_stars_for_level(request.user, level)
context_dict['ulp'] = get_user_rating_for_level(request.user, level)
if request.is_ajax():
return render_to_response('game/template.html', context_dict, context)
else:
return render_to_response('game/template_noajax.html', context_dict, context)
def get_solution(request, level=1):
"""Returns a render of answers.html"""
context = RequestContext(request)
cheat_message = '\\text{Ulovlig tegn har blitt brukt i svar}'
required_message = '\\text{Svaret ditt har ikke utfylt alle krav}'
render_to = 'game/answer.html'
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
form_values = form.process()
template = Template.objects.get(pk=form_values['primary_key'])
user_answer = form_values['user_answer']
try:
disallowed = json.loads(template.disallowed)
except ValueError:
disallowed = []
try:
required = json.loads(template.required)
except ValueError:
required = []
context_dict = make_answer_context_dict(form_values)
if (cheat_check(user_answer, disallowed, form_values['variable_dictionary'].split('§'))) and\
(form_values['template_type'] == 'normal') and (context_dict['user_won']):
context_dict['answer'] = cheat_message
return render_to_response(render_to, context_dict, context)
elif (required_check(user_answer, required, form_values['variable_dictionary'].split('§'))) and \
(form_values['template_type'] == 'normal') and (context_dict['user_won']):
context_dict['answer'] = required_message
return render_to_response(render_to, context_dict, context)
if request.is_ajax():
new_user_rating, new_star = change_level_rating(template, request.user, context_dict['user_won'],
form_values['template_type'], level)
context_dict['chapter_id'] = request.POST['chapter_id']
context_dict['ulp'] = int(new_user_rating)
context_dict['new_star'] = new_star
context_dict['stars'] = get_user_stars_for_level(request.user, Level.objects.get(pk=level))
return render_to_response(render_to, context_dict, context)
else:
change_elo(template, request.user, context_dict['user_won'], form_values['template_type'])
render_to_response(render_to, context_dict, context)
else:
print(form.errors) | bsd-3-clause | 2,966,183,766,129,237,500 | 44.017241 | 120 | 0.607635 | false |
sebastianwebber/pgconfig-api | common/bytes.py | 1 | 3943 | #!/usr/bin/env python
import math
"""
Bytes-to-human / human-to-bytes converter.
Based on: http://goo.gl/kTQMs
Working with Python 2.x and 3.x.
Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
# see: http://goo.gl/kTQMs
SYMBOLS = {
'customary' : ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'),
'customary_ext' : ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'),
'iec' : ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext' : ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
def sizeof_fmt(num):
for x in ['bytes','KB','MB','GB','TB']:
if num < 1024.0 :
# always rounding
return "%.0f%s" % (num, x)
num /= 1024.0
def sizeof_fmt2(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def bytes2human(n, format='%(value).0f%(symbol)s', symbols='customary'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> bytes2human(0)
'0.0 B'
>>> bytes2human(0.9)
'0.0 B'
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1.9)
'1.0 B'
>>> bytes2human(1024)
'1.0 K'
>>> bytes2human(1048576)
'1.0 M'
>>> bytes2human(1099511627776127398123789121)
'909.5 Y'
>>> bytes2human(9856, symbols="customary")
'9.6 K'
>>> bytes2human(9856, symbols="customary_ext")
'9.6 kilo'
>>> bytes2human(9856, symbols="iec")
'9.6 Ki'
>>> bytes2human(9856, symbols="iec_ext")
'9.6 kibi'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
## FORCE round up less then KB
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
if (int(n) % prefix[symbol]) == 0:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def human2bytes(s):
"""
Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised.
>>> human2bytes('0 B')
0
>>> human2bytes('1 K')
1024
>>> human2bytes('1 M')
1048576
>>> human2bytes('1 Gi')
1073741824
>>> human2bytes('1 tera')
1099511627776
>>> human2bytes('0.5kilo')
512
>>> human2bytes('0.1 byte')
0
>>> human2bytes('1 k') # k is an alias for K
1024
>>> human2bytes('12 foo')
Traceback (most recent call last):
...
ValueError: can't interpret '12 foo'
"""
init = s
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0]
s = s[1:]
num = float(num)
letter = s.strip()
for name, sset in SYMBOLS.items():
if letter in sset:
break
else:
if letter == 'k':
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
sset = SYMBOLS['customary']
letter = letter.upper()
else:
raise ValueError("can't interpret %r" % init)
prefix = {sset[0]:1}
for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i+1)*10
return int(num * prefix[letter]) | bsd-2-clause | 5,092,341,793,858,649,000 | 26.774648 | 78 | 0.516104 | false |
keras-team/keras-io | guides/intro_to_keras_for_engineers.py | 1 | 31765 | """
Title: Introduction to Keras for Engineers
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/04/01
Last modified: 2020/04/28
Description: Everything you need to know to use Keras to build real-world machine learning solutions.
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
"""
## Introduction
Are you a machine learning engineer looking to use Keras
to ship deep-learning powered features in real products? This guide will serve
as your first introduction to core Keras API concepts.
In this guide, you will learn how to:
- Prepare your data before training a model (by turning it into either NumPy
arrays or `tf.data.Dataset` objects).
- Do data preprocessing, for instance feature normalization or vocabulary
indexing.
- Build a model that turns your data into useful predictions,
using the Keras Functional API.
- Train your model with the built-in Keras `fit()` method, while being
mindful of checkpointing, metrics monitoring, and fault tolerance.
- Evaluate your model on a test data and how to use it for inference on new data.
- Customize what `fit()` does, for instance to build a GAN.
- Speed up training by leveraging multiple GPUs.
- Refine your model through hyperparameter tuning.
At the end of this guide, you will get pointers to end-to-end examples to solidify
these concepts:
- Image classification
- Text classification
- Credit card fraud detection
"""
"""
## Data loading & preprocessing
Neural networks don't process raw data, like text files, encoded JPEG image files, or
CSV files. They process **vectorized** & **standardized** representations.
- Text files need to be read into string tensors, then split into words. Finally, the
words need to be indexed & turned into integer tensors.
- Images need to be read and decoded into integer tensors, then converted to floating
point and normalized to small values (usually between 0 and 1).
- CSV data needs to be parsed, with numerical features converted to floating point
tensors and categorical features indexed and converted to integer tensors.
Then each feature typically needs to be normalized to zero-mean and unit-variance.
- Etc.
Let's start with data loading.
## Data loading
Keras models accept three types of inputs:
- **NumPy arrays**, just like Scikit-Learn and many other Python-based libraries. This
is a good option if your data fits in memory.
- **[TensorFlow `Dataset` objects](https://www.tensorflow.org/guide/data)**. This is a
high-performance option that is more suitable for datasets that do not fit in memory
and that are streamed from disk or from a distributed filesystem.
- **Python generators** that yield batches of data (such as custom subclasses of
the `keras.utils.Sequence` class).
Before you start training a model, you will need to make your data available as one of
these formats. If you have a large dataset and you are training on GPU(s), consider
using `Dataset` objects, since they will take care of performance-critical details,
such as:
- Asynchronously preprocessing your data on CPU while your GPU is busy, and buffering
it into a queue.
- Prefetching data on GPU memory so it's immediately available when the GPU has
finished processing the previous batch, so you can reach full GPU utilization.
Keras features a range of utilities to help you turn raw data on disk into a `Dataset`:
- `tf.keras.preprocessing.image_dataset_from_directory` turns image files sorted into
class-specific folders into a labeled dataset of image tensors.
- `tf.keras.preprocessing.text_dataset_from_directory` does the same for text files.
In addition, the TensorFlow `tf.data` includes other similar utilities, such as
`tf.data.experimental.make_csv_dataset` to load structured data from CSV files.
**Example: obtaining a labeled dataset from image files on disk**
Supposed you have image files sorted by class in different folders, like this:
```
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
```
Then you can do:
```python
# Create a dataset.
dataset = keras.preprocessing.image_dataset_from_directory(
'path/to/main_directory', batch_size=64, image_size=(200, 200))
# For demonstration, iterate over the batches yielded by the dataset.
for data, labels in dataset:
print(data.shape) # (64, 200, 200, 3)
print(data.dtype) # float32
print(labels.shape) # (64,)
print(labels.dtype) # int32
```
The label of a sample is the rank of its folder in alphanumeric order. Naturally, this
can also be configured explicitly by passing, e.g.
`class_names=['class_a', 'class_b']`, in which cases label `0` will be `class_a` and
`1` will be `class_b`.
**Example: obtaining a labeled dataset from text files on disk**
Likewise for text: if you have `.txt` documents sorted by class in different folders,
you can do:
```python
dataset = keras.preprocessing.text_dataset_from_directory(
'path/to/main_directory', batch_size=64)
# For demonstration, iterate over the batches yielded by the dataset.
for data, labels in dataset:
print(data.shape) # (64,)
print(data.dtype) # string
print(labels.shape) # (64,)
print(labels.dtype) # int32
```
"""
"""
## Data preprocessing with Keras
Once your data is in the form of string/int/float NumPy arrays, or a `Dataset` object
(or Python generator) that yields batches of string/int/float tensors,
it is time to **preprocess** the data. This can mean:
- Tokenization of string data, followed by token indexing.
- Feature normalization.
- Rescaling the data to small values (in general, input values to a neural network
should be close to zero -- typically we expect either data with zero-mean and
unit-variance, or data in the `[0, 1]` range.
### The ideal machine learning model is end-to-end
In general, you should seek to do data preprocessing **as part of your model** as much
as possible, not via an external data preprocessing pipeline. That's because external
data preprocessing makes your models less portable when it's time to use them in
production. Consider a model that processes text: it uses a specific tokenization
algorithm and a specific vocabulary index. When you want to ship your model to a
mobile app or a JavaScript app, you will need to recreate the exact same preprocessing
setup in the target language. This can get very tricky: any small discrepancy between
the original pipeline and the one you recreate has the potential to completely
invalidate your model, or at least severely degrade its performance.
It would be much easier to be able to simply export an end-to-end model that already
includes preprocessing. **The ideal model should expect as input something as close as
possible to raw data: an image model should expect RGB pixel values in the `[0, 255]`
range, and a text model should accept strings of `utf-8` characters.** That way, the
consumer of the exported model doesn't have
to know about the preprocessing pipeline.
### Using Keras preprocessing layers
In Keras, you do in-model data preprocessing via **preprocessing layers**. This
includes:
- Vectorizing raw strings of text via the `TextVectorization` layer
- Feature normalization via the `Normalization` layer
- Image rescaling, cropping, or image data augmentation
The key advantage of using Keras preprocessing layers is that **they can be included
directly into your model**, either during training or after training,
which makes your models portable.
Some preprocessing layers have a state:
- `TextVectorization` holds an index mapping words or tokens to integer indices
- `Normalization` holds the mean and variance of your features
The state of a preprocessing layer is obtained by calling `layer.adapt(data)` on a
sample of the training data (or all of it).
**Example: turning strings into sequences of integer word indices**
"""
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
# Example training data, of dtype `string`.
training_data = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
# Create a TextVectorization layer instance. It can be configured to either
# return integer token indices, or a dense token representation (e.g. multi-hot
# or TF-IDF). The text standardization and text splitting algorithms are fully
# configurable.
vectorizer = TextVectorization(output_mode="int")
# Calling `adapt` on an array or dataset makes the layer generate a vocabulary
# index for the data, which can then be reused when seeing new data.
vectorizer.adapt(training_data)
# After calling adapt, the layer is able to encode any n-gram it has seen before
# in the `adapt()` data. Unknown n-grams are encoded via an "out-of-vocabulary"
# token.
integer_data = vectorizer(training_data)
print(integer_data)
"""
**Example: turning strings into sequences of one-hot encoded bigrams**
"""
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
# Example training data, of dtype `string`.
training_data = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
# Create a TextVectorization layer instance. It can be configured to either
# return integer token indices, or a dense token representation (e.g. multi-hot
# or TF-IDF). The text standardization and text splitting algorithms are fully
# configurable.
vectorizer = TextVectorization(output_mode="binary", ngrams=2)
# Calling `adapt` on an array or dataset makes the layer generate a vocabulary
# index for the data, which can then be reused when seeing new data.
vectorizer.adapt(training_data)
# After calling adapt, the layer is able to encode any n-gram it has seen before
# in the `adapt()` data. Unknown n-grams are encoded via an "out-of-vocabulary"
# token.
integer_data = vectorizer(training_data)
print(integer_data)
"""
**Example: normalizing features**
"""
from tensorflow.keras.layers.experimental.preprocessing import Normalization
# Example image data, with values in the [0, 255] range
training_data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
normalizer = Normalization(axis=-1)
normalizer.adapt(training_data)
normalized_data = normalizer(training_data)
print("var: %.4f" % np.var(normalized_data))
print("mean: %.4f" % np.mean(normalized_data))
"""
**Example: rescaling & center-cropping images**
Both the `Rescaling` layer and the `CenterCrop` layer are stateless, so it isn't
necessary to call `adapt()` in this case.
"""
from tensorflow.keras.layers.experimental.preprocessing import CenterCrop
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
# Example image data, with values in the [0, 255] range
training_data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
cropper = CenterCrop(height=150, width=150)
scaler = Rescaling(scale=1.0 / 255)
output_data = scaler(cropper(training_data))
print("shape:", output_data.shape)
print("min:", np.min(output_data))
print("max:", np.max(output_data))
"""
## Building models with the Keras Functional API
A "layer" is a simple input-output transformation (such as the scaling &
center-cropping transformations above). For instance, here's a linear projection layer
that maps its inputs to a 16-dimensional feature space:
```python
dense = keras.layers.Dense(units=16)
```
A "model" is a directed acyclic graph of layers. You can think of a model as a
"bigger layer" that encompasses multiple sublayers and that can be trained via exposure
to data.
The most common and most powerful way to build Keras models is the Functional API. To
build models with the Functional API, you start by specifying the shape (and
optionally the dtype) of your inputs. If any dimension of your input can vary, you can
specify it as `None`. For instance, an input for 200x200 RGB image would have shape
`(200, 200, 3)`, but an input for RGB images of any size would have shape `(None,
None, 3)`.
"""
# Let's say we expect our inputs to be RGB images of arbitrary size
inputs = keras.Input(shape=(None, None, 3))
"""
After defining your input(s), you can chain layer transformations on top of your inputs,
until your final output:
"""
from tensorflow.keras import layers
# Center-crop images to 150x150
x = CenterCrop(height=150, width=150)(inputs)
# Rescale images to [0, 1]
x = Rescaling(scale=1.0 / 255)(x)
# Apply some convolution and pooling layers
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
# Apply global average pooling to get flat feature vectors
x = layers.GlobalAveragePooling2D()(x)
# Add a dense classifier on top
num_classes = 10
outputs = layers.Dense(num_classes, activation="softmax")(x)
"""
Once you have defined the directed acyclic graph of layers that turns your input(s) into
your outputs, instantiate a `Model` object:
"""
model = keras.Model(inputs=inputs, outputs=outputs)
"""
This model behaves basically like a bigger layer. You can call it on batches of data, like
this:
"""
data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
processed_data = model(data)
print(processed_data.shape)
"""
You can print a summary of how your data gets transformed at each stage of the model.
This is useful for debugging.
Note that the output shape displayed for each layer includes the **batch size**. Here
the batch size is None, which indicates our model can process batches of any size.
"""
model.summary()
"""
The Functional API also makes it easy to build models that have multiple inputs (for
instance, an image *and* its metadata) or multiple outputs (for instance, predicting
the class of the image *and* the likelihood that a user will click on it). For a
deeper dive into what you can do, see our
[guide to the Functional API](/guides/functional_api/).
"""
"""
## Training models with `fit()`
At this point, you know:
- How to prepare your data (e.g. as a NumPy array or a `tf.data.Dataset` object)
- How to build a model that will process your data
The next step is to train your model on your data. The `Model` class features a
built-in training loop, the `fit()` method. It accepts `Dataset` objects, Python
generators that yield batches of data, or NumPy arrays.
Before you can call `fit()`, you need to specify an optimizer and a loss function (we
assume you are already familiar with these concepts). This is the `compile()` step:
```python
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.CategoricalCrossentropy())
```
Loss and optimizer can be specified via their string identifiers (in this case
their default constructor argument values are used):
```python
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
```
Once your model is compiled, you can start "fitting" the model to the data.
Here's what fitting a model looks like with NumPy data:
```python
model.fit(numpy_array_of_samples, numpy_array_of_labels,
batch_size=32, epochs=10)
```
Besides the data, you have to specify two key parameters: the `batch_size` and
the number of epochs (iterations on the data). Here our data will get sliced on batches
of 32 samples, and the model will iterate 10 times over the data during training.
Here's what fitting a model looks like with a dataset:
```python
model.fit(dataset_of_samples_and_labels, epochs=10)
```
Since the data yielded by a dataset is expected to be already batched, you don't need to
specify the batch size here.
Let's look at it in practice with a toy example model that learns to classify MNIST
digits:
"""
# Get the data as Numpy arrays
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Build a simple model
inputs = keras.Input(shape=(28, 28))
x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(inputs)
x = layers.Flatten()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dense(128, activation="relu")(x)
outputs = layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.summary()
# Compile the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
# Train the model for 1 epoch from Numpy data
batch_size = 64
print("Fit on NumPy data")
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=1)
# Train the model for 1 epoch using a dataset
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
print("Fit on Dataset")
history = model.fit(dataset, epochs=1)
"""
The `fit()` call returns a "history" object which records what happened over the course
of training. The `history.history` dict contains per-epoch timeseries of metrics
values (here we have only one metric, the loss, and one epoch, so we only get a single
scalar):
"""
print(history.history)
"""
For a detailed overview of how to use `fit()`, see the
[guide to training & evaluation with the built-in Keras methods](
/guides/training_with_built_in_methods/).
"""
"""
### Keeping track of performance metrics
As you're training a model, you want to keep track of metrics such as classification
accuracy, precision, recall, AUC, etc. Besides, you want to monitor these metrics not
only on the training data, but also on a validation set.
**Monitoring metrics**
You can pass a list of metric objects to `compile()`, like this:
"""
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
history = model.fit(dataset, epochs=1)
"""
**Passing validation data to `fit()`**
You can pass validation data to `fit()` to monitor your validation loss & validation
metrics. Validation metrics get reported at the end of each epoch.
"""
val_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
history = model.fit(dataset, epochs=1, validation_data=val_dataset)
"""
### Using callbacks for checkpointing (and more)
If training goes on for more than a few minutes, it's important to save your model at
regular intervals during training. You can then use your saved models
to restart training in case your training process crashes (this is important for
multi-worker distributed training, since with many workers at least one of them is
bound to fail at some point).
An important feature of Keras is **callbacks**, configured in `fit()`. Callbacks are
objects that get called by the model at different point during training, in particular:
- At the beginning and end of each batch
- At the beginning and end of each epoch
Callbacks are a way to make model trainable entirely scriptable.
You can use callbacks to periodically save your model. Here's a simple example: a
`ModelCheckpoint` callback
configured to save the model at the end of every epoch. The filename will include the
current epoch.
```python
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath='path/to/my/model_{epoch}',
save_freq='epoch')
]
model.fit(dataset, epochs=2, callbacks=callbacks)
```
"""
"""
You can also use callbacks to do things like periodically changing the learning of your
optimizer, streaming metrics to a Slack bot, sending yourself an email notification
when training is complete, etc.
For detailed overview of what callbacks are available and how to write your own, see
the [callbacks API documentation](/api/callbacks/) and the
[guide to writing custom callbacks](/guides/writing_your_own_callbacks/).
"""
"""
### Monitoring training progress with TensorBoard
Staring at the Keras progress bar isn't the most ergonomic way to monitor how your loss
and metrics are evolving over time. There's a better solution:
[TensorBoard](https://www.tensorflow.org/tensorboard),
a web application that can display real-time graphs of your metrics (and more).
To use TensorBoard with `fit()`, simply pass a `keras.callbacks.TensorBoard` callback
specifying the directory where to store TensorBoard logs:
```python
callbacks = [
keras.callbacks.TensorBoard(log_dir='./logs')
]
model.fit(dataset, epochs=2, callbacks=callbacks)
```
You can then launch a TensorBoard instance that you can open in your browser to monitor
the logs getting written to this location:
```
tensorboard --logdir=./logs
```
What's more, you can launch an in-line TensorBoard tab when training models in Jupyter
/ Colab notebooks.
[Here's more information](https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks).
"""
"""
### After `fit()`: evaluating test performance & generating predictions on new data
Once you have a trained model, you can evaluate its loss and metrics on new data via
`evaluate()`:
"""
loss, acc = model.evaluate(val_dataset) # returns loss and metrics
print("loss: %.2f" % loss)
print("acc: %.2f" % acc)
"""
You can also generate NumPy arrays of predictions (the activations of the output
layer(s) in the model) via `predict()`:
"""
predictions = model.predict(val_dataset)
print(predictions.shape)
"""
## Using `fit()` with a custom training step
By default, `fit()` is configured for **supervised learning**. If you need a different
kind of training loop (for instance, a GAN training loop), you
can provide your own implementation of the `Model.train_step()` method. This is the
method that is repeatedly called during `fit()`.
Metrics, callbacks, etc. will work as usual.
Here's a simple example that reimplements what `fit()` normally does:
```python
class CustomModel(keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred,
regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
# Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer='adam', loss='mse', metrics=[...])
# Just use `fit` as usual
model.fit(dataset, epochs=3, callbacks=...)
```
For a detailed overview of how you customize the built-in training & evaluation loops,
see the guide:
["Customizing what happens in `fit()`"](/guides/customizing_what_happens_in_fit/).
"""
"""
## Debugging your model with eager execution
If you write custom training steps or custom layers, you will need to debug them. The
debugging experience is an integral part of a framework: with Keras, the debugging
workflow is designed with the user in mind.
By default, your Keras models are compiled to highly-optimized computation graphs that
deliver fast execution times. That means that the Python code you write (e.g. in a
custom `train_step`) is not the code you are actually executing. This introduces a
layer of indirection that can make debugging hard.
Debugging is best done step by step. You want to be able to sprinkle your code with
`print()` statement to see what your data looks like after every operation, you want
to be able to use `pdb`. You can achieve this by **running your model eagerly**. With
eager execution, the Python code you write is the code that gets executed.
Simply pass `run_eagerly=True` to `compile()`:
```python
model.compile(optimizer='adam', loss='mse', run_eagerly=True)
```
Of course, the downside is that it makes your model significantly slower. Make sure to
switch it back off to get the benefits of compiled computation graphs once you are
done debugging!
In general, you will use `run_eagerly=True` every time you need to debug what's
happening inside your `fit()` call.
"""
"""
## Speeding up training with multiple GPUs
Keras has built-in industry-strength support for multi-GPU training and distributed
multi-worker training, via the `tf.distribute` API.
If you have multiple GPUs on your machine, you can train your model on all of them by:
- Creating a `tf.distribute.MirroredStrategy` object
- Building & compiling your model inside the strategy's scope
- Calling `fit()` and `evaluate()` on a dataset as usual
```python
# Create a MirroredStrategy.
strategy = tf.distribute.MirroredStrategy()
# Open a strategy scope.
with strategy.scope():
# Everything that creates variables should be under the strategy scope.
# In general this is only model construction & `compile()`.
model = Model(...)
model.compile(...)
# Train the model on all available devices.
train_dataset, val_dataset, test_dataset = get_dataset()
model.fit(train_dataset, epochs=2, validation_data=val_dataset)
# Test the model on all available devices.
model.evaluate(test_dataset)
```
For a detailed introduction to multi-GPU & distributed training, see
[this guide](/guides/distributed_training/).
"""
"""
## Doing preprocessing synchronously on-device vs. asynchronously on host CPU
You've learned about preprocessing, and you've seen example where we put image
preprocessing layers (`CenterCrop` and `Rescaling`) directly inside our model.
Having preprocessing happen as part of the model during training
is great if you want to do on-device preprocessing, for instance, GPU-accelerated
feature normalization or image augmentation. But there are kinds of preprocessing that
are not suited to this setup: in particular, text preprocessing with the
`TextVectorization` layer. Due to its sequential nature and due to the fact that it
can only run on CPU, it's often a good idea to do **asynchronous preprocessing**.
With asynchronous preprocessing, your preprocessing operations will run on CPU, and the
preprocessed samples will be buffered into a queue while your GPU is busy with
previous batch of data. The next batch of preprocessed samples will then be fetched
from the queue to the GPU memory right before the GPU becomes available again
(prefetching). This ensures that preprocessing will not be blocking and that your GPU
can run at full utilization.
To do asynchronous preprocessing, simply use `dataset.map` to inject a preprocessing
operation into your data pipeline:
"""
# Example training data, of dtype `string`.
samples = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
labels = [[0], [1]]
# Prepare a TextVectorization layer.
vectorizer = TextVectorization(output_mode="int")
vectorizer.adapt(samples)
# Asynchronous preprocessing: the text vectorization is part of the tf.data pipeline.
# First, create a dataset
dataset = tf.data.Dataset.from_tensor_slices((samples, labels)).batch(2)
# Apply text vectorization to the samples
dataset = dataset.map(lambda x, y: (vectorizer(x), y))
# Prefetch with a buffer size of 2 batches
dataset = dataset.prefetch(2)
# Our model should expect sequences of integers as inputs
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(input_dim=10, output_dim=32)(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="adam", loss="mse", run_eagerly=True)
model.fit(dataset)
"""
Compare this to doing text vectorization as part of the model:
"""
# Our dataset will yield samples that are strings
dataset = tf.data.Dataset.from_tensor_slices((samples, labels)).batch(2)
# Our model should expect strings as inputs
inputs = keras.Input(shape=(1,), dtype="string")
x = vectorizer(inputs)
x = layers.Embedding(input_dim=10, output_dim=32)(x)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="adam", loss="mse", run_eagerly=True)
model.fit(dataset)
"""
When training text models on CPU, you will generally not see any performance difference
between the two setups. When training on GPU, however, doing asynchronous buffered
preprocessing on the host CPU while the GPU is running the model itself can result in
a significant speedup.
After training, if you want to export an end-to-end model that includes the preprocessing
layer(s), this is easy to do, since `TextVectorization` is a layer:
```python
inputs = keras.Input(shape=(1,), dtype='string')
x = vectorizer(inputs)
outputs = trained_model(x)
end_to_end_model = keras.Model(inputs, outputs)
```
"""
"""
## Finding the best model configuration with hyperparameter tuning
Once you have a working model, you're going to want to optimize its configuration --
architecture choices, layer sizes, etc. Human intuition can only go so far, so you'll
want to leverage a systematic approach: hyperparameter search.
You can use
[Keras Tuner](https://keras-team.github.io/keras-tuner/documentation/tuners/) to find
the best hyperparameter for your Keras models. It's as easy as calling `fit()`.
Here how it works.
First, place your model definition in a function, that takes a single `hp` argument.
Inside this function, replace any value you want to tune with a call to hyperparameter
sampling methods, e.g. `hp.Int()` or `hp.Choice()`:
```python
def build_model(hp):
inputs = keras.Input(shape=(784,))
x = layers.Dense(
units=hp.Int('units', min_value=32, max_value=512, step=32),
activation='relu'))(inputs)
outputs = layers.Dense(10, activation='softmax')(x)
model = keras.Model(inputs, outputs)
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice('learning_rate',
values=[1e-2, 1e-3, 1e-4])),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
```
The function should return a compiled model.
Next, instantiate a tuner object specifying your optimization objective and other search
parameters:
```python
import keras_tuner
tuner = keras_tuner.tuners.Hyperband(
build_model,
objective='val_loss',
max_epochs=100,
max_trials=200,
executions_per_trial=2,
directory='my_dir')
```
Finally, start the search with the `search()` method, which takes the same arguments as
`Model.fit()`:
```python
tuner.search(dataset, validation_data=val_dataset)
```
When search is over, you can retrieve the best model(s):
```python
models = tuner.get_best_models(num_models=2)
```
Or print a summary of the results:
```python
tuner.results_summary()
```
"""
"""
## End-to-end examples
To familiarize yourself with the concepts in this introduction, see the following
end-to-end examples:
- [Text classification](/examples/nlp/text_classification_from_scratch/)
- [Image classification](/examples/vision/image_classification_from_scratch/)
- [Credit card fraud detection](/examples/structured_data/imbalanced_classification/)
"""
"""
## What to learn next
- Learn more about the
[Functional API](/guides/functional_api/).
- Learn more about the
[features of `fit()` and `evaluate()`](/guides/training_with_built_in_methods/).
- Learn more about
[callbacks](/guides/writing_your_own_callbacks/).
- Learn more about
[creating your own custom training steps](/guides/customizing_what_happens_in_fit/).
- Learn more about
[multi-GPU and distributed training](/guides/distributed_training/).
- Learn how to do [transfer learning](/guides/transfer_learning/).
"""
| apache-2.0 | 7,729,671,323,865,348,000 | 34.412486 | 101 | 0.74815 | false |
openstack/zaqar | zaqar/transport/validation.py | 1 | 29503 | # Copyright (c) 2013 Rackspace, Inc.
# Copyright (c) 2015 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
import uuid
from oslo_utils import timeutils
import six
from zaqar.common import consts
from zaqar.conf import transport
from zaqar.i18n import _
MIN_MESSAGE_TTL = 60
MIN_CLAIM_TTL = 60
MIN_CLAIM_GRACE = 60
MIN_DELAY_TTL = 0
MIN_SUBSCRIPTION_TTL = 60
_PURGBLE_RESOURCE_TYPES = {'messages', 'subscriptions'}
# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match
# only ASCII characters.
QUEUE_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_\-.]+$')
QUEUE_NAME_MAX_LEN = 64
PROJECT_ID_MAX_LEN = 256
class ValidationFailed(ValueError):
"""User input did not follow API restrictions."""
def __init__(self, msg, *args, **kwargs):
msg = msg.format(*args, **kwargs)
super(ValidationFailed, self).__init__(msg)
class Validator(object):
def __init__(self, conf):
self._conf = conf
self._conf.register_opts(transport.ALL_OPTS,
group=transport.GROUP_NAME)
self._limits_conf = self._conf[transport.GROUP_NAME]
self._supported_operations = ('add', 'remove', 'replace')
def queue_identification(self, queue, project):
"""Restrictions on a project id & queue name pair.
:param queue: Name of the queue
:param project: Project id
:raises ValidationFailed: if the `name` is longer than 64
characters or contains anything other than ASCII digits and
letters, underscores, and dashes. Also raises if `project`
is not None but longer than 256 characters.
"""
if project is not None and len(project) > PROJECT_ID_MAX_LEN:
msg = _(u'Project ids may not be more than {0} characters long.')
raise ValidationFailed(msg, PROJECT_ID_MAX_LEN)
if len(queue) > QUEUE_NAME_MAX_LEN:
msg = _(u'Queue names may not be more than {0} characters long.')
raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN)
if not QUEUE_NAME_REGEX.match(queue):
raise ValidationFailed(
_(u'Queue names may only contain ASCII letters, digits, '
'underscores, and dashes.'))
def _get_change_operation_d10(self, raw_change):
op = raw_change.get('op')
if op is None:
msg = (_('Unable to find `op` in JSON Schema change. '
'It must be one of the following: %(available)s.') %
{'available': ', '.join(self._supported_operations)})
raise ValidationFailed(msg)
if op not in self._supported_operations:
msg = (_('Invalid operation: `%(op)s`. '
'It must be one of the following: %(available)s.') %
{'op': op,
'available': ', '.join(self._supported_operations)})
raise ValidationFailed(msg)
return op
def _get_change_path_d10(self, raw_change):
try:
return raw_change['path']
except KeyError:
msg = _("Unable to find '%s' in JSON Schema change") % 'path'
raise ValidationFailed(msg)
def _decode_json_pointer(self, pointer):
"""Parse a json pointer.
Json Pointers are defined in
http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .
The pointers use '/' for separation between object attributes, such
that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character
in an attribute name is encoded as "~1" and a '~' character is encoded
as "~0".
"""
self._validate_json_pointer(pointer)
ret = []
for part in pointer.lstrip('/').split('/'):
ret.append(part.replace('~1', '/').replace('~0', '~').strip())
return ret
def _validate_json_pointer(self, pointer):
"""Validate a json pointer.
We only accept a limited form of json pointers.
"""
if not pointer.startswith('/'):
msg = _('Pointer `%s` does not start with "/".') % pointer
raise ValidationFailed(msg)
if re.search(r'/\s*?/', pointer[1:]):
msg = _('Pointer `%s` contains adjacent "/".') % pointer
raise ValidationFailed(msg)
if len(pointer) > 1 and pointer.endswith('/'):
msg = _('Pointer `%s` end with "/".') % pointer
raise ValidationFailed(msg)
if pointer[1:].strip() == '/':
msg = _('Pointer `%s` does not contains valid token.') % pointer
raise ValidationFailed(msg)
if re.search(r'~[^01]', pointer) or pointer.endswith('~'):
msg = _('Pointer `%s` contains "~" not part of'
' a recognized escape sequence.') % pointer
raise ValidationFailed(msg)
def _get_change_value(self, raw_change, op):
if 'value' not in raw_change:
msg = _('Operation "{0}" requires a member named "value".')
raise ValidationFailed(msg, op)
return raw_change['value']
def _validate_change(self, change):
if change['op'] == 'remove':
return
path_root = change['path'][0]
if len(change['path']) >= 1 and path_root.lower() != 'metadata':
msg = _("The root of path must be metadata, e.g /metadata/key.")
raise ValidationFailed(msg)
def _validate_path(self, op, path):
limits = {'add': 2, 'remove': 2, 'replace': 2}
if len(path) != limits.get(op, 2):
msg = _("Invalid JSON pointer for this resource: "
"'/%s, e.g /metadata/key'") % '/'.join(path)
raise ValidationFailed(msg)
def _parse_json_schema_change(self, raw_change, draft_version):
if draft_version == 10:
op = self._get_change_operation_d10(raw_change)
path = self._get_change_path_d10(raw_change)
else:
msg = _('Unrecognized JSON Schema draft version')
raise ValidationFailed(msg)
path_list = self._decode_json_pointer(path)
return op, path_list
def _validate_retry_policy(self, metadata):
retry_policy = metadata.get('_retry_policy') if metadata else None
if retry_policy and not isinstance(retry_policy, dict):
msg = _('retry_policy must be a dict.')
raise ValidationFailed(msg)
if retry_policy:
valid_keys = ['retries_with_no_delay', 'minimum_delay_retries',
'minimum_delay', 'maximum_delay',
'maximum_delay_retries', 'retry_backoff_function',
'ignore_subscription_override']
for key in valid_keys:
retry_value = retry_policy.get(key)
if key == 'retry_backoff_function':
if retry_value and not isinstance(retry_value, str):
msg = _('retry_backoff_function must be a string.')
raise ValidationFailed(msg)
# Now we support linear, arithmetic, exponential
# and geometric retry backoff function.
fun = {'linear', 'arithmetic', 'exponential', 'geometric'}
if retry_value and retry_value not in fun:
msg = _('invalid retry_backoff_function.')
raise ValidationFailed(msg)
elif key == 'ignore_subscription_override':
if retry_value and not isinstance(retry_value, bool):
msg = _('ignore_subscription_override must be a '
'boolean.')
raise ValidationFailed(msg)
else:
if retry_value and not isinstance(retry_value, int):
msg = _('Retry policy: %s must be a integer.') % key
raise ValidationFailed(msg)
min_delay = retry_policy.get('minimum_delay',
consts.MINIMUM_DELAY)
max_delay = retry_policy.get('maximum_delay',
consts.MAXIMUM_DELAY)
if max_delay < min_delay:
msg = _('minimum_delay must less than maximum_delay.')
raise ValidationFailed(msg)
if ((max_delay - min_delay) < 2*consts.LINEAR_INTERVAL):
msg = _('invalid minimum_delay and maximum_delay.')
raise ValidationFailed(msg)
def queue_patching(self, request, changes):
washed_changes = []
content_types = {
'application/openstack-messaging-v2.0-json-patch': 10,
}
json_schema_version = content_types[request.content_type]
if not isinstance(changes, list):
msg = _('Request body must be a JSON array of operation objects.')
raise ValidationFailed(msg)
for raw_change in changes:
if not isinstance(raw_change, dict):
msg = _('Operations must be JSON objects.')
raise ValidationFailed(msg)
(op, path) = self._parse_json_schema_change(raw_change,
json_schema_version)
# NOTE(flwang): Now the 'path' is a list.
self._validate_path(op, path)
change = {'op': op, 'path': path,
'json_schema_version': json_schema_version}
if not op == 'remove':
change['value'] = self._get_change_value(raw_change, op)
self._validate_change(change)
washed_changes.append(change)
return washed_changes
def queue_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of queues.
:param limit: The expected number of queues in the list
:param kwargs: Ignored arguments passed to storage API
:raises ValidationFailed: if the limit is exceeded
"""
uplimit = self._limits_conf.max_queues_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and no greater than {0}.')
raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)
def queue_metadata_length(self, content_length):
"""Restrictions on queue's length.
:param content_length: Queue request's length.
:raises ValidationFailed: if the metadata is oversize.
"""
if content_length is None:
return
if content_length > self._limits_conf.max_queue_metadata:
msg = _(u'Queue metadata is too large. Max size: {0}')
raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
def queue_metadata_putting(self, queue_metadata):
"""Checking if the reserved attributes of the queue are valid.
:param queue_metadata: Queue's metadata.
:raises ValidationFailed: if any reserved attribute is invalid.
"""
if not queue_metadata:
return
queue_default_ttl = queue_metadata.get('_default_message_ttl')
if queue_default_ttl and not isinstance(queue_default_ttl, int):
msg = _(u'_default_message_ttl must be integer.')
raise ValidationFailed(msg)
if queue_default_ttl is not None:
if not (MIN_MESSAGE_TTL <= queue_default_ttl <=
self._limits_conf.max_message_ttl):
msg = _(u'_default_message_ttl can not exceed {0} '
'seconds, and must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL)
queue_max_msg_size = queue_metadata.get('_max_messages_post_size',
None)
if queue_max_msg_size and not isinstance(queue_max_msg_size, int):
msg = _(u'_max_messages_post_size must be integer.')
raise ValidationFailed(msg)
if queue_max_msg_size is not None:
if not (0 < queue_max_msg_size <=
self._limits_conf.max_messages_post_size):
raise ValidationFailed(
_(u'_max_messages_post_size can not exceed {0}, '
' and must be at least greater than 0.'),
self._limits_conf.max_messages_post_size)
max_claim_count = queue_metadata.get('_max_claim_count', None)
if max_claim_count and not isinstance(max_claim_count, int):
msg = _(u'_max_claim_count must be integer.')
raise ValidationFailed(msg)
dlq_ttl = queue_metadata.get('_dead_letter_queue_messages_ttl', None)
if dlq_ttl and not isinstance(dlq_ttl, int):
msg = _(u'_dead_letter_queue_messages_ttl must be integer.')
raise ValidationFailed(msg)
if dlq_ttl is not None and not (MIN_MESSAGE_TTL <= dlq_ttl <=
self._limits_conf.max_message_ttl):
msg = _(u'The TTL for a message may not exceed {0} seconds, '
'and must be at least {1} seconds long.')
raise ValidationFailed(msg, self._limits_conf.max_message_ttl,
MIN_MESSAGE_TTL)
queue_delay = queue_metadata.get('_default_message_delay',
None)
if queue_delay and not isinstance(queue_delay, int):
msg = _(u'_default_message_delay must be integer.')
raise ValidationFailed(msg)
if queue_delay is not None:
if not (MIN_DELAY_TTL <= queue_delay <=
self._limits_conf.max_message_delay):
msg = _(u'The TTL can not exceed {0} seconds, and must '
'be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_delay,
MIN_DELAY_TTL)
encrypted_queue = queue_metadata.get('_enable_encrypt_messages', False)
if encrypted_queue and not isinstance(encrypted_queue, bool):
msg = _(u'_enable_encrypt_messages must be boolean.')
raise ValidationFailed(msg)
self._validate_retry_policy(queue_metadata)
def queue_purging(self, document):
"""Restrictions the resource types to be purged for a queue.
:param resource_types: Type list of all resource under a queue
:raises ValidationFailed: if the resource types are invalid
"""
if 'resource_types' not in document:
msg = _(u'Post body must contain key "resource_types".')
raise ValidationFailed(msg)
if (not set(document['resource_types']).issubset(
_PURGBLE_RESOURCE_TYPES)):
msg = _(u'Resource types must be a sub set of {0}.')
raise ValidationFailed(msg, _PURGBLE_RESOURCE_TYPES)
def message_posting(self, messages):
"""Restrictions on a list of messages.
:param messages: A list of messages
:raises ValidationFailed: if any message has a out-of-range
TTL.
"""
if not messages:
raise ValidationFailed(_(u'No messages to enqueu.'))
for msg in messages:
self.message_content(msg)
def message_length(self, content_length, max_msg_post_size=None):
"""Restrictions on message post length.
:param content_length: Queue request's length.
:raises ValidationFailed: if the metadata is oversize.
"""
if content_length is None:
return
if max_msg_post_size:
try:
min_max_size = min(max_msg_post_size,
self._limits_conf.max_messages_post_size)
if content_length > min_max_size:
raise ValidationFailed(
_(u'Message collection size is too large. The max '
'size for current queue is {0}. It is calculated '
'by max size = min(max_messages_post_size_config: '
'{1}, max_messages_post_size_queue: {2}).'),
min_max_size,
self._limits_conf.max_messages_post_size,
max_msg_post_size)
except TypeError:
# NOTE(flwang): If there is a type error when using min(),
# it only happens in py3.x, it will be skipped and compare
# the message length with the size defined in config file.
pass
if content_length > self._limits_conf.max_messages_post_size:
raise ValidationFailed(
_(u'Message collection size is too large. Max size {0}'),
self._limits_conf.max_messages_post_size)
def message_content(self, message):
"""Restrictions on each message."""
ttl = message['ttl']
if not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl):
msg = _(u'The TTL for a message may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL)
delay = message.get('delay', 0)
if not (MIN_DELAY_TTL <= delay <=
self._limits_conf.max_message_delay):
msg = _(u'The Delay TTL for a message may not exceed {0} seconds,'
'and must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_delay,
MIN_DELAY_TTL)
def message_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of messages.
:param limit: The expected number of messages in the list
:param kwargs: Ignored arguments passed to storage API
:raises ValidationFailed: if the limit is exceeded
"""
uplimit = self._limits_conf.max_messages_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_page)
def message_deletion(self, ids=None, pop=None, claim_ids=None):
"""Restrictions involving deletion of messages.
:param ids: message ids passed in by the delete request
:param pop: count of messages to be POPped
:param claim_ids: claim ids passed in by the delete request
:raises ValidationFailed: if,
pop AND id params are present together
neither pop or id params are present
message count to be popped > maximum allowed
"""
if pop is not None and ids is not None:
msg = _(u'pop and id params cannot be present together in the '
'delete request.')
raise ValidationFailed(msg)
if pop is None and ids is None:
msg = _(u'The request should have either "ids" or "pop" '
'parameter in the request, to be able to delete.')
raise ValidationFailed(msg)
if self._limits_conf.message_delete_with_claim_id:
if (ids and claim_ids is None) or (ids is None and claim_ids):
msg = _(u'The request should have both "ids" and "claim_ids" '
'parameter in the request when '
'message_delete_with_claim_id is True.')
raise ValidationFailed(msg)
pop_uplimit = self._limits_conf.max_messages_per_claim_or_pop
if pop is not None and not (0 < pop <= pop_uplimit):
msg = _(u'Pop value must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(msg, pop_uplimit)
delete_uplimit = self._limits_conf.max_messages_per_page
if ids is not None and not (0 < len(ids) <= delete_uplimit):
msg = _(u'ids parameter should have at least 1 and not '
'greater than {0} values.')
raise ValidationFailed(msg, delete_uplimit)
def claim_creation(self, metadata, limit=None):
"""Restrictions on the claim parameters upon creation.
:param metadata: The claim metadata
:param limit: The number of messages to claim
:raises ValidationFailed: if either TTL or grace is out of range,
or the expected number of messages exceed the limit.
"""
self.claim_updating(metadata)
uplimit = self._limits_conf.max_messages_per_claim_or_pop
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_messages_per_claim_or_pop)
grace = metadata['grace']
if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace):
msg = _(u'The grace for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)
def claim_updating(self, metadata):
"""Restrictions on the claim TTL.
:param metadata: The claim metadata
:raises ValidationFailed: if the TTL is out of range
"""
ttl = metadata['ttl']
if not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl):
msg = _(u'The TTL for a claim may not exceed {0} seconds, and '
'must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_claim_ttl, MIN_CLAIM_TTL)
def subscription_posting(self, subscription):
"""Restrictions on a creation of subscription.
:param subscription: dict of subscription
:raises ValidationFailed: if the subscription is invalid.
"""
for p in ('subscriber',):
if p not in subscription.keys():
raise ValidationFailed(_(u'Missing parameter %s in body.') % p)
self.subscription_patching(subscription)
def subscription_patching(self, subscription):
"""Restrictions on an update of subscription.
:param subscription: dict of subscription
:raises ValidationFailed: if the subscription is invalid.
"""
if not subscription:
raise ValidationFailed(_(u'No subscription to create.'))
if not isinstance(subscription, dict):
msg = _('Subscriptions must be a dict.')
raise ValidationFailed(msg)
subscriber = subscription.get('subscriber')
subscriber_type = None
if subscriber:
parsed_uri = six.moves.urllib_parse.urlparse(subscriber)
subscriber_type = parsed_uri.scheme
if subscriber_type not in self._limits_conf.subscriber_types:
msg = _(u'The subscriber type of subscription must be '
u'supported in the list {0}.')
raise ValidationFailed(msg, self._limits_conf.subscriber_types)
options = subscription.get('options')
if options and not isinstance(options, dict):
msg = _(u'Options must be a dict.')
raise ValidationFailed(msg)
self._validate_retry_policy(options)
ttl = subscription.get('ttl')
if ttl:
if not isinstance(ttl, int):
msg = _(u'TTL must be an integer.')
raise ValidationFailed(msg)
if ttl < MIN_SUBSCRIPTION_TTL:
msg = _(u'The TTL for a subscription '
'must be at least {0} seconds long.')
raise ValidationFailed(msg, MIN_SUBSCRIPTION_TTL)
# NOTE(flwang): By this change, technically, user can set a very
# big TTL so as to get a very long subscription.
now = timeutils.utcnow_ts()
now_dt = datetime.datetime.utcfromtimestamp(now)
msg = _(u'The TTL seconds for a subscription plus current time'
' must be less than {0}.')
try:
# NOTE(flwang): If below expression works, then we believe the
# ttl is acceptable otherwise it exceeds the max time of
# python.
now_dt + datetime.timedelta(seconds=ttl)
except OverflowError:
raise ValidationFailed(msg, datetime.datetime.max)
def subscription_confirming(self, confirmed):
confirmed = confirmed.get('confirmed')
if not isinstance(confirmed, bool):
msg = _(u"The 'confirmed' should be boolean.")
raise ValidationFailed(msg)
def subscription_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of subscriptions.
:param limit: The expected number of subscriptions in the list
:param kwargs: Ignored arguments passed to storage API
:raises ValidationFailed: if the limit is exceeded
"""
uplimit = self._limits_conf.max_subscriptions_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and may not '
'be greater than {0}.')
raise ValidationFailed(
msg, self._limits_conf.max_subscriptions_per_page)
def get_limit_conf_value(self, limit_conf_name=None):
"""Return the value of limit configuration.
:param limit_conf_name: configuration name
"""
return self._limits_conf[limit_conf_name]
def flavor_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of pools.
:param limit: The expected number of flavors in the list
:param kwargs: Ignored arguments passed to storage API
:raises ValidationFailed: if the limit is exceeded
"""
uplimit = self._limits_conf.max_flavors_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and no greater than {0}.')
raise ValidationFailed(msg, self._limits_conf.max_flavors_per_page)
def pool_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of pools.
:param limit: The expected number of flavors in the list
:param kwargs: Ignored arguments passed to storage API
:raises ValidationFailed: if the limit is exceeded
"""
uplimit = self._limits_conf.max_pools_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and no greater than {0}.')
raise ValidationFailed(msg, self._limits_conf.max_pools_per_page)
def client_id_uuid_safe(self, client_id):
"""Restrictions the format of client id
:param client_id: the client id of request
:raises ValidationFailed: if the limit is exceeded
"""
if self._limits_conf.client_id_uuid_safe == 'off':
if (len(client_id) < self._limits_conf.min_length_client_id) or \
(len(client_id) > self._limits_conf.max_length_client_id):
msg = _(u'Length of client id must be at least {0} and no '
'greater than {1}.')
raise ValidationFailed(msg,
self._limits_conf.min_length_client_id,
self._limits_conf.max_length_client_id)
if self._limits_conf.client_id_uuid_safe == 'strict':
uuid.UUID(client_id)
def topic_identification(self, topic, project):
"""Restrictions on a project id & topic name pair.
:param queue: Name of the topic
:param project: Project id
:raises ValidationFailed: if the `name` is longer than 64
characters or contains anything other than ASCII digits and
letters, underscores, and dashes. Also raises if `project`
is not None but longer than 256 characters.
"""
if project is not None and len(project) > PROJECT_ID_MAX_LEN:
msg = _(u'Project ids may not be more than {0} characters long.')
raise ValidationFailed(msg, PROJECT_ID_MAX_LEN)
if len(topic) > QUEUE_NAME_MAX_LEN:
msg = _(u'Topic names may not be more than {0} characters long.')
raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN)
if not QUEUE_NAME_REGEX.match(topic):
raise ValidationFailed(
_(u'Topic names may only contain ASCII letters, digits, '
'underscores, and dashes.'))
| apache-2.0 | 3,048,376,038,165,915,600 | 40.729844 | 79 | 0.574789 | false |
FabriceSalvaire/Musica | Musica/MusicXML/Pyxb/_xlink.py | 1 | 5773 | # ./_xlink.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:b43cd366527ddb6a0e58594876e07421e0148f30
# Generated 2017-10-09 18:48:41.313719 by PyXB version 1.2.6 using Python 3.6.2.final.0
# Namespace http://www.w3.org/1999/xlink [xmlns:xlink]
from __future__ import unicode_literals
import pyxb
from pyxb.binding.datatypes import positiveInteger as p_positiveInteger
from pyxb.binding.basis import complexTypeDefinition as p_complexTypeDefinition
from pyxb.binding.basis import element as p_element
from pyxb.binding.basis import enumeration_mixin as p_enumeration_mixin
from pyxb.binding.content import AttributeUse as p_AttributeUse
from pyxb.binding.content import ElementDeclaration as p_ElementDeclaration
from pyxb.binding.content import ElementUse as p_ElementUse
from pyxb.binding.datatypes import ID as p_ID
from pyxb.binding.datatypes import IDREF as p_IDREF
from pyxb.binding.datatypes import NMTOKEN as p_NMTOKEN
from pyxb.binding.datatypes import anySimpleType as p_anySimpleType
from pyxb.binding.datatypes import anyType as p_anyType
from pyxb.binding.datatypes import decimal as p_decimal
from pyxb.binding.datatypes import integer as p_integer
from pyxb.binding.datatypes import nonNegativeInteger as p_nonNegativeInteger
from pyxb.binding.datatypes import string as p_string
from pyxb.binding.datatypes import token as p_token
from pyxb.binding.facets import CF_enumeration as p_CF_enumeration
from pyxb.binding.facets import CF_maxInclusive as p_CF_maxInclusive
from pyxb.binding.facets import CF_minExclusive as p_CF_minExclusive
from pyxb.binding.facets import CF_minInclusive as p_CF_minInclusive
from pyxb.namespace import ExpandedName as p_ExpandedName
from pyxb.utils.utility import Location as p_Location
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:b41e9400-ad11-11e7-be22-185e0f77ec0a')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.6'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# A holder for module-level binding classes so we can access them from
# inside class definitions where property names may conflict.
_module_typeBindings = pyxb.utils.utility.Object()
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://www.w3.org/1999/xlink', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return p_element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: [anonymous]
class STD_ANON (p_NMTOKEN, p_enumeration_mixin):
_ExpandedName = None
_XSDLocation = p_Location('xlink.xsd', 23, 2)
_Documentation = ''
STD_ANON._CF_enumeration = p_CF_enumeration(value_datatype=STD_ANON, enum_prefix=None)
STD_ANON.simple = STD_ANON._CF_enumeration.addEnumeration(unicode_value='simple', tag='simple')
STD_ANON._InitializeFacetMap(STD_ANON._CF_enumeration)
_module_typeBindings.STD_ANON = STD_ANON
# Atomic simple type: [anonymous]
class STD_ANON_ (p_NMTOKEN, p_enumeration_mixin):
_ExpandedName = None
_XSDLocation = p_Location('xlink.xsd', 35, 2)
_Documentation = ''
STD_ANON_._CF_enumeration = p_CF_enumeration(value_datatype=STD_ANON_, enum_prefix=None)
STD_ANON_.new = STD_ANON_._CF_enumeration.addEnumeration(unicode_value='new', tag='new')
STD_ANON_.replace = STD_ANON_._CF_enumeration.addEnumeration(unicode_value='replace', tag='replace')
STD_ANON_.embed = STD_ANON_._CF_enumeration.addEnumeration(unicode_value='embed', tag='embed')
STD_ANON_.other = STD_ANON_._CF_enumeration.addEnumeration(unicode_value='other', tag='other')
STD_ANON_.none = STD_ANON_._CF_enumeration.addEnumeration(unicode_value='none', tag='none')
STD_ANON_._InitializeFacetMap(STD_ANON_._CF_enumeration)
_module_typeBindings.STD_ANON_ = STD_ANON_
# Atomic simple type: [anonymous]
class STD_ANON_2 (p_NMTOKEN, p_enumeration_mixin):
_ExpandedName = None
_XSDLocation = p_Location('xlink.xsd', 47, 2)
_Documentation = ''
STD_ANON_2._CF_enumeration = p_CF_enumeration(value_datatype=STD_ANON_2, enum_prefix=None)
STD_ANON_2.onRequest = STD_ANON_2._CF_enumeration.addEnumeration(unicode_value='onRequest', tag='onRequest')
STD_ANON_2.onLoad = STD_ANON_2._CF_enumeration.addEnumeration(unicode_value='onLoad', tag='onLoad')
STD_ANON_2.other = STD_ANON_2._CF_enumeration.addEnumeration(unicode_value='other', tag='other')
STD_ANON_2.none = STD_ANON_2._CF_enumeration.addEnumeration(unicode_value='none', tag='none')
STD_ANON_2._InitializeFacetMap(STD_ANON_2._CF_enumeration)
_module_typeBindings.STD_ANON_2 = STD_ANON_2
| gpl-3.0 | -5,277,354,413,599,819,000 | 46.319672 | 109 | 0.774467 | false |
BrendanLeber/adventofcode | 2019/12-n-body_problem/tests.py | 1 | 2247 | # -*- coding: utf-8 -*-
import unittest
from solve1 import Moon, Vector, apply_gravity
class VectorUnitTests(unittest.TestCase):
def test_constructor(self):
v = Vector()
self.assertEqual(v.x, 0)
self.assertEqual(v.y, 0)
self.assertEqual(v.z, 0)
v = Vector.parse("<x=-1, y=0, z=2>")
self.assertEqual(v, Vector(-1, 0, 2))
v = Vector.parse("<x=2, y=-10, z=-7>")
self.assertEqual(v, Vector(2, -10, -7))
v = Vector.parse("<x=4, y=-8, z=8>")
self.assertEqual(v, Vector(4, -8, 8))
v = Vector.parse("<x=3, y=5, z=-1>")
self.assertEqual(v, Vector(3, 5, -1))
v = Vector.parse("<x=128, y=256, z=-512>")
self.assertEqual(v, Vector(128, 256, -512))
class MoonUnitTests(unittest.TestCase):
def test_constructor(self):
m = Moon.parse("<x=-1, y=0, z=2>")
self.assertEqual(m, Moon(Vector(-1, 0, 2), Vector(0, 0, 0)))
def test_apply_gravity(self):
ganymede = Moon.parse("<x=3, y=3, z=10>")
callisto = Moon.parse("<x=5, y=3, z=1>")
ganymede.apply_gravity(callisto)
self.assertEqual(ganymede.vel, Vector(1, 0, -1))
callisto.apply_gravity(ganymede)
self.assertEqual(callisto.vel, Vector(-1, 0, 1))
def test_apply_velocity(self):
europa = Moon(Vector(1, 2, 3), Vector(-2, 0, 3))
europa.apply_velocity()
self.assertEqual(europa.pos, Vector(-1, 2, 6))
class SolutionUnitTests(unittest.TestCase):
def test_apply_gravity_2(self):
moons = [Moon(Vector(0, 0, 0), Vector(0, 0, 0)), Moon(Vector(9, 9, 9), Vector(3, 3, 3))]
apply_gravity(moons)
self.assertEqual(moons[0].vel, Vector(1, 1, 1))
self.assertEqual(moons[1].vel, Vector(2, 2, 2))
def test_apply_gravity_3(self):
moons = [
Moon(Vector(0, 0, 0), Vector(0, 0, 0)),
Moon(Vector(4, 4, 4), Vector(2, 2, 2)),
Moon(Vector(9, 9, 9), Vector(3, 3, 3)),
]
apply_gravity(moons)
self.assertEqual(moons[0].vel, Vector(2, 2, 2))
self.assertEqual(moons[1].vel, Vector(2, 2, 2))
self.assertEqual(moons[2].vel, Vector(1, 1, 1))
if __name__ == "__main__":
unittest.main()
| mit | -798,287,704,878,660,100 | 29.780822 | 96 | 0.554962 | false |
Wiredcraft/maestro-ng | maestro/lifecycle.py | 1 | 6661 | # Copyright (C) 2014 SignalFuse, Inc.
#
# Docker container orchestration utility.
import socket
import subprocess
import time
import requests
import re
from . import exceptions
class BaseLifecycleHelper:
"""Base class for lifecycle helpers."""
def test(self):
"""State helpers must implement this method to perform the state test.
The method must return True if the test succeeds, False otherwise."""
raise NotImplementedError
class TCPPortPinger(BaseLifecycleHelper):
"""
Lifecycle state helper that "pings" a particular TCP port.
"""
DEFAULT_MAX_WAIT = 300
def __init__(self, host, port, attempts=1):
"""Create a new TCP port pinger for the given host and port. The given
number of attempts will be made, until the port is open or we give
up."""
self.host = host
self.port = int(port)
self.attempts = int(attempts)
def __repr__(self):
return 'PortPing(tcp://{}:{}, {} attempts)'.format(
self.host, self.port, self.attempts)
def __ping_port(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((self.host, self.port))
s.close()
return True
except Exception:
return False
def test(self):
retries = self.attempts
while retries > 0:
if self.__ping_port():
return True
retries -= 1
if retries > 0:
time.sleep(1)
return False
@staticmethod
def from_config(container, config):
if config['port'] not in container.ports:
raise exceptions.InvalidLifecycleCheckConfigurationException(
'Port {} is not defined by {}!'.format(
config['port'], container.name))
parts = container.ports[config['port']]['external'][1].split('/')
if parts[1] == 'udp':
raise exceptions.InvalidLifecycleCheckConfigurationException(
'Port {} is not TCP!'.format(config['port']))
return TCPPortPinger(
container.ship.ip, int(parts[0]),
attempts=config.get('max_wait', TCPPortPinger.DEFAULT_MAX_WAIT))
class ScriptExecutor(BaseLifecycleHelper):
"""
Lifecycle state helper that executes a script and uses the exit code as the
success value.
"""
def __init__(self, command):
self.command = command
def __repr__(self):
return 'ScriptExec({})'.format(self.command)
def test(self):
return subprocess.call(self.command, shell=True) == 0
@staticmethod
def from_config(container, config):
return ScriptExecutor(config['command'])
class Sleep(BaseLifecycleHelper):
"""
Lifecycle state helper that simply sleeps for a given amount of time (in
seconds).
"""
def __init__(self, wait):
self.wait = wait
def __repr__(self):
return 'Sleep({}s)'.format(self.wait)
def test(self):
while self.wait > 0:
time.sleep(1)
self.wait -= 1
return True
@staticmethod
def from_config(container, config):
return Sleep(config['wait'])
class HttpRequestLifecycle(BaseLifecycleHelper):
"""
Lifecycle request that makes a web request and checks for a given response
"""
DEFAULT_MAX_WAIT = 300
def __init__(self, host, port, match_regex=None, path='/', scheme='http',
method='get', max_wait=DEFAULT_MAX_WAIT, requests_options={}):
self.host = host
self.port = port
self.match_regex = match_regex
if self.match_regex:
try:
self.match_regex = re.compile(match_regex, re.DOTALL)
except:
raise exceptions.InvalidLifecycleCheckConfigurationException(
'Bad regex for {}: {}'.format(self.__class__.__name__,
match_regex)
)
self.path = path
if not self.path.startswith('/'):
self.path = '/'+self.path
self.scheme = scheme
self.method = method.lower()
self.max_wait = int(max_wait)
# Extra options passed directly to the requests library
self.requests_options = requests_options
def test(self):
start = time.time()
end_by = start+self.max_wait
url = '{}://{}:{}{}'.format(self.scheme, self.host, self.port,
self.path)
while time.time() < end_by:
try:
response = requests.request(self.method, url,
**self.requests_options)
if self._test_response(response):
return True
except:
pass
time.sleep(1)
return False
def _test_response(self, response):
if self.match_regex:
if getattr(response, 'text', None) and \
self.match_regex.search(response.text):
return True
else:
if response.status_code == requests.codes.ok:
return True
return False
@staticmethod
def from_config(container, config):
host = container.ship.ip
if config.get('host'):
host = config.get('host')
del config['host']
port = None
if config['port'] not in container.ports:
try:
# accept a numbered port
port = int(config['port'])
except:
raise exceptions.InvalidLifecycleCheckConfigurationException(
'Port {} is not defined by {}!'.format(
config['port'], container.name))
if port is None:
parts = container.ports[config['port']]['external'][1].split('/')
if parts[1] == 'udp':
raise exceptions.InvalidLifecycleCheckConfigurationException(
'Port {} is not TCP!'.format(config['port']))
port = int(parts[0])
opts = {}
opts.update(**config)
del opts['port']
del opts['type']
return HttpRequestLifecycle(host, port, **opts)
class LifecycleHelperFactory:
HELPERS = {
'tcp': TCPPortPinger,
'exec': ScriptExecutor,
'sleep': Sleep,
'http': HttpRequestLifecycle
}
@staticmethod
def from_config(container, config):
return (LifecycleHelperFactory.HELPERS[config['type']]
.from_config(container, config))
| lgpl-3.0 | 8,331,319,784,645,058,000 | 28.604444 | 79 | 0.556823 | false |
TopherGopher/aws-infra.jenkins-scripts | jenkins-callback-wrapper.py | 1 | 3552 | #!/usr/bin/python
#from jenkinsapi.jenkins import Jenkins
#from jenkinsapi.custom_exceptions import JenkinsAPIException
#from jenkinsapi.constants import STATUS_SUCCESS
import jenkins
import jenkinspoll
import click
import os
import requests
@click.command()
@click.option('--jenkins-url', help="The URL of the Jenkins instance to connect to")
@click.option('--environment', prompt='Environment', help='Environment e.g. production, staging')
@click.option('--chef-action', prompt='Chef action', help='e.g. update, backup', type=click.Choice(['update', 'delete', 'backup']))
@click.option('--bag-name', default=None, help="If set, will only trigger the job on the single bag specified")
def trigger_web_jobs(jenkins_url, environment, chef_action, bag_name):
"""
Trigger a mass web job based on environment
:param environment - Which environment would you like to try executing this job on?
:param chef_action - What would you like to perform on this environment?
:param bag_name - Optional - what bag woud you specifically like to work on in this environment?
"""
print "Connecting to Jenkins..."
try:
# Preumably username / password will come back as a ConfigMap.
J = jenkins.Jenkins(jenkins_url, username=os.environ.get('JENKINS_SERVICE_USERNAME'), password=os.environ.get('JENKINS_SERVICE_PASSWORD'))
print "Connection successful"
except jenkins.JenkinsException as e:
print "Could not establish connection to Jenkins server at {jenkins_url}".format(jenkins_url=jenkins_url)
exit(1)
jenkins_job_list = J.get_jobs()
jenkins_job_list = [str(x["name"]) for x in jenkins_job_list]
if bag_name is not None:
job_name = "{environment}-{bag_name}".format(environment=environment, bag_name=bag_name)
# Make sure the job exists
if job_name not in jenkins_job_list:
print "Job '{job_name}' could not be found in the jenkins job list. Available jobs are:\n\t{jobs}".format(job_name=job_name,jobs=",".join(jenkins_job_list))
exit(1)
# Set build parameters, kick off a new build, and block until complete.
params = {'CHEF_ACTION': chef_action }
# Block so the jobs execute one-at-a-time
try:
print "Invoking job {job_name}...".format(job_name=job_name)
J.build_job(job_name, params)
jenkinspoll.wait_for_job_to_finish(job_name, jenkins_connection=J)
print "Done!"
exit(0)
except Exception as e:
print e
exit(1)
else:
print "Looking for jobs that contain '{environment}'".format(environment=environment)
for job_name in jenkins_job_list:
if job_name != "" and job_name != "{environment}-".format(environment=environment) and job_name.startswith(environment):
print "Invoking {job_name}".format(job_name=job_name)
# Set build parameters, kick off a new build, and block until complete.
params = {'CHEF_ACTION': chef_action }
# Block so the jobs execute one-at-a-time
try:
J.build_job(job_name, params)
jenkinspoll.wait_for_job_to_finish(job_name, jenkins_connection=J)
except Exception as e:
print e
# Determine if the job already exists on this jenkins instance. If not, clone
# a new job from the 'sitepack' seed job so each build has it's own history.
if __name__ == '__main__':
trigger_web_jobs() | mit | -7,938,456,413,074,036,000 | 48.347222 | 168 | 0.653435 | false |
cvxgrp/cvxpylayers | cvxpylayers/tensorflow/cvxpylayer.py | 1 | 12651 | import cvxpy as cp
from cvxpy.reductions.solvers.conic_solvers.scs_conif import \
dims_to_solver_dict
import diffcp
import numpy as np
try:
import tensorflow as tf
except ImportError:
raise ImportError("Unable to import tensorflow. Please install "
"TensorFlow >= 2.0 (https://tensorflow.org).")
tf_major_version = int(tf.__version__.split('.')[0])
if tf_major_version < 2:
raise ImportError("cvxpylayers requires TensorFlow >= 2.0; please "
"upgrade your installation of TensorFlow, which is "
"version %s." % tf.__version__)
class CvxpyLayer(object):
"""A differentiable convex optimization layer
A CvxpyLayer solves a parametrized convex optimization problem given by a
CVXPY problem. It solves the problem in its forward pass, and it computes
the derivative of problem's solution map with respect to the parameters in
its backward pass. The CVPXY problem must be a disciplined parametrized
program.
Example usage
```
import cvxpy as cp
import tensorflow as tf
from cvxpylayers.tensorflow import CvxpyLayer
n, m = 2, 3
x = cp.Variable(n)
A = cp.Parameter((m, n))
b = cp.Parameter(m)
constraints = [x >= 0]
objective = cp.Minimize(0.5 * cp.pnorm(A @ x - b, p=1))
problem = cp.Problem(objective, constraints)
assert problem.is_dpp()
cvxpylayer = CvxpyLayer(problem, parameters=[A, b], variables=[x])
A_tf = tf.Variable(tf.random.normal((m, n)))
b_tf = tf.Variable(tf.random.normal((m,)))
with tf.GradientTape() as tape:
# solve the problem, setting the values of A and b to A_tf and b_tf
solution, = cvxpylayer(A_tf, b_tf)
summed_solution = tf.math.reduce_sum(solution)
gradA, gradb = tape.gradient(summed_solution, [A_tf, b_tf])
```
"""
def __init__(self, problem, parameters, variables, gp=False):
"""Construct a CvxpyLayer
Args:
problem: The CVXPY problem; must be DPP.
parameters: A list of CVXPY Parameters in the problem; the order
of the Parameters determines the order in which parameter
values must be supplied in the forward pass.
variables: A list of CVXPY Variables in the problem; the order of the
Variables determines the order of the optimal variable
values returned from the forward pass.
gp: Whether to parse the problem using DGP (True or False).
"""
if gp:
if not problem.is_dgp(dpp=True):
raise ValueError('Problem must be DPP.')
else:
if not problem.is_dcp(dpp=True):
raise ValueError('Problem must be DPP.')
if set(parameters) != set(problem.parameters()):
raise ValueError("The layer's parameters must exactly match "
"problem.parameters")
if not set(variables).issubset(set(problem.variables())):
raise ValueError('Argument `variables` must be a subset of '
'`problem.variables()`')
self.params = parameters
self.gp = gp
if self.gp:
for param in parameters:
if param.value is None:
raise ValueError("An initial value for each parameter is "
"required when gp=True.")
data, solving_chain, _ = (
problem.get_problem_data(solver=cp.SCS, gp=True)
)
self.asa_maps = data[cp.settings.PARAM_PROB]
self.dgp2dcp = solving_chain.get(cp.reductions.Dgp2Dcp)
self.param_ids = [p.id for p in self.asa_maps.parameters]
else:
data, _, _ = problem.get_problem_data(solver=cp.SCS)
self.asa_maps = data[cp.settings.PARAM_PROB]
self.param_ids = [p.id for p in self.params]
self.cones = dims_to_solver_dict(data['dims'])
self.vars = variables
def __call__(self, *parameters, solver_args={}):
"""Solve problem (or a batch of problems) corresponding to `parameters`
Args:
parameters: a sequence of tf.Tensors; the n-th Tensor specifies
the value for the n-th CVXPY Parameter. These Tensors
can be batched: if a Tensor has 3 dimensions, then its
first dimension is interpreted as the batch size.
solver_args: a dict of optional arguments, to send to `diffcp`. Keys
should be the names of keyword arguments.
Returns:
a list of optimal variable values, one for each CVXPY Variable
supplied to the constructor.
"""
if len(parameters) != len(self.params):
raise ValueError('A tensor must be provided for each CVXPY '
'parameter; received %d tensors, expected %d' % (
len(parameters), len(self.params)))
compute = tf.custom_gradient(
lambda *parameters: self._compute(parameters, solver_args))
return compute(*parameters)
def _dx_from_dsoln(self, dsoln):
dvars_numpy = list(map(lambda x: x.numpy(), dsoln))
del_vars = {}
for v, dv in zip(self.vars, dvars_numpy):
del_vars[v.id] = dv
return self.asa_maps.split_adjoint(del_vars)
def _problem_data_from_params(self, params):
c, _, A, b = self.asa_maps.apply_parameters(
dict(zip(self.param_ids, params)), keep_zeros=True)
A = -A
return A, b, c
def _restrict_DT_to_dx(self, DT, batch_size, s_shape):
if batch_size > 0:
zeros = [np.zeros(s_shape) for _ in range(batch_size)]
else:
zeros = np.zeros(s_shape)
return lambda dxs: DT(dxs, zeros, zeros)
def _split_solution(self, x):
soln = self.asa_maps.split_solution(x, {v.id: v for v in self.vars})
return tuple([tf.constant(soln[v.id]) for v in self.vars])
def _compute(self, params, solver_args={}):
tf_params = params
params = [p.numpy() for p in params]
# infer whether params are batched
batch_sizes = []
for i, (p_in, p_signature) in enumerate(zip(params, self.params)):
# check and extract the batch size for the parameter
# 0 means there is no batch dimension for this parameter
# and we assume the batch dimension is non-zero
if p_in.ndim == p_signature.ndim:
batch_size = 0
elif p_in.ndim == p_signature.ndim + 1:
batch_size = p_in.shape[0]
if batch_size == 0:
raise ValueError(
"Invalid parameter size passed in. "
"Parameter {} appears to be batched, but the leading "
"dimension is 0".format(i))
else:
raise ValueError(
"Invalid parameter size passed in. Expected "
"parameter {} to have have {} or {} dimensions "
"but got {} dimensions".format(
i, p_signature.ndim, p_signature.ndim + 1,
p_in.ndim))
batch_sizes.append(batch_size)
# validate the parameter shape
p_shape = p_in.shape if batch_size == 0 else p_in.shape[1:]
if not np.all(p_shape == p_signature.shape):
raise ValueError(
"Inconsistent parameter shapes passed in. "
"Expected parameter {} to have non-batched shape of "
"{} but got {}.".format(
i,
p_signature.shape,
p_signature.shape))
batch_sizes = np.array(batch_sizes)
any_batched = np.any(batch_sizes > 0)
if any_batched:
nonzero_batch_sizes = batch_sizes[batch_sizes > 0]
batch_size = int(nonzero_batch_sizes[0])
if np.any(nonzero_batch_sizes != batch_size):
raise ValueError(
"Inconsistent batch sizes passed in. Expected "
"parameters to have no batch size or all the same "
"batch size but got sizes: {}.".format(batch_sizes))
else:
batch_size = 1
if self.gp:
old_params_to_new_params = self.dgp2dcp.canon_methods._parameters
param_map = {}
# construct a list of params for the DCP problem
for param, value in zip(self.params, params):
if param in old_params_to_new_params:
new_id = old_params_to_new_params[param].id
param_map[new_id] = np.log(value)
else:
new_id = param.id
param_map[new_id] = value
params = [param_map[pid] for pid in self.param_ids]
As, bs, cs = [], [], []
for i in range(batch_size):
params_i = [
p if sz == 0 else p[i] for p, sz in zip(params, batch_sizes)]
A, b, c = self._problem_data_from_params(params_i)
As.append(A)
bs.append(b)
cs.append(c)
try:
xs, _, ss, _, DT = diffcp.solve_and_derivative_batch(
As=As, bs=bs, cs=cs, cone_dicts=[self.cones] * batch_size,
**solver_args)
except diffcp.SolverError as e:
print(
"Please consider re-formulating your problem so that "
"it is always solvable or increasing the number of "
"solver iterations.")
raise e
DT = self._restrict_DT_to_dx(DT, batch_size, ss[0].shape)
solns = [self._split_solution(x) for x in xs]
# soln[i] is a tensor with first dimension equal to batch_size, holding
# the optimal values for variable i
solution = [
tf.stack([s[i] for s in solns]) for i in range(len(self.vars))]
if not any_batched:
solution = [tf.squeeze(s, 0) for s in solution]
if self.gp:
solution = [tf.exp(s) for s in solution]
def gradient_function(*dsoln):
if self.gp:
dsoln = [dsoln*s for dsoln, s in zip(dsoln, solution)]
if not any_batched:
dsoln = [tf.expand_dims(dvar, 0) for dvar in dsoln]
# split the batched dsoln tensors into lists, with one list
# corresponding to each problem in the batch
dsoln_lists = [[] for _ in range(batch_size)]
for value in dsoln:
tensors = tf.split(value, batch_size)
for dsoln_list, t in zip(dsoln_lists, tensors):
dsoln_list.append(tf.squeeze(t))
dxs = [self._dx_from_dsoln(dsoln_list)
for dsoln_list in dsoln_lists]
dAs, dbs, dcs = DT(dxs)
dparams_dict_unbatched = [
self.asa_maps.apply_param_jac(dc, -dA, db) for
(dA, db, dc) in zip(dAs, dbs, dcs)]
dparams = []
for pid in self.param_ids:
dparams.append(
tf.constant([d[pid] for d in dparams_dict_unbatched]))
if not any_batched:
dparams = tuple(tf.squeeze(dparam, 0) for dparam in dparams)
else:
for i, sz in enumerate(batch_sizes):
if sz == 0:
dparams[i] = tf.reduce_sum(dparams[i], axis=0)
if self.gp:
# differentiate through the log transformation of params
dcp_dparams = dparams
dparams = []
grads = {pid: g for pid, g in zip(self.param_ids, dcp_dparams)}
old_params_to_new_params = (
self.dgp2dcp.canon_methods._parameters
)
for param, value in zip(self.params, tf_params):
g = 0.0 if param.id not in grads else grads[param.id]
if param in old_params_to_new_params:
dcp_param_id = old_params_to_new_params[param].id
# new_param.value == log(param), apply chain rule
g = g + (1.0 / value) * grads[dcp_param_id]
dparams.append(g)
return tuple(dparams)
return solution, gradient_function
| apache-2.0 | -8,604,243,101,993,928,000 | 41.311037 | 79 | 0.539957 | false |
evilhero/mylar | lib/comictaggerlib/autotagmatchwindow.py | 1 | 8516 | """A PyQT4 dialog to select from automated issue matches"""
# Copyright 2012-2014 Anthony Beville
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
#import sys
from PyQt4 import QtCore, QtGui, uic
#from PyQt4.QtCore import QUrl, pyqtSignal, QByteArray
from settings import ComicTaggerSettings
from comicarchive import MetaDataStyle
from coverimagewidget import CoverImageWidget
from comictaggerlib.ui.qtutils import reduceWidgetFontSize
#from imagefetcher import ImageFetcher
#from comicvinetalker import ComicVineTalker
#import utils
class AutoTagMatchWindow(QtGui.QDialog):
volume_id = 0
def __init__(self, parent, match_set_list, style, fetch_func):
super(AutoTagMatchWindow, self).__init__(parent)
uic.loadUi(
ComicTaggerSettings.getUIFile('matchselectionwindow.ui'), self)
self.altCoverWidget = CoverImageWidget(
self.altCoverContainer, CoverImageWidget.AltCoverMode)
gridlayout = QtGui.QGridLayout(self.altCoverContainer)
gridlayout.addWidget(self.altCoverWidget)
gridlayout.setContentsMargins(0, 0, 0, 0)
self.archiveCoverWidget = CoverImageWidget(
self.archiveCoverContainer, CoverImageWidget.ArchiveMode)
gridlayout = QtGui.QGridLayout(self.archiveCoverContainer)
gridlayout.addWidget(self.archiveCoverWidget)
gridlayout.setContentsMargins(0, 0, 0, 0)
reduceWidgetFontSize(self.twList)
reduceWidgetFontSize(self.teDescription, 1)
self.setWindowFlags(self.windowFlags() |
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowMaximizeButtonHint)
self.skipButton = QtGui.QPushButton(self.tr("Skip to Next"))
self.buttonBox.addButton(
self.skipButton, QtGui.QDialogButtonBox.ActionRole)
self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setText(
"Accept and Write Tags")
self.match_set_list = match_set_list
self.style = style
self.fetch_func = fetch_func
self.current_match_set_idx = 0
self.twList.currentItemChanged.connect(self.currentItemChanged)
self.twList.cellDoubleClicked.connect(self.cellDoubleClicked)
self.skipButton.clicked.connect(self.skipToNext)
self.updateData()
def updateData(self):
self.current_match_set = self.match_set_list[
self.current_match_set_idx]
if self.current_match_set_idx + 1 == len(self.match_set_list):
self.buttonBox.button(
QtGui.QDialogButtonBox.Cancel).setDisabled(True)
# self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setText("Accept")
self.skipButton.setText(self.tr("Skip"))
self.setCoverImage()
self.populateTable()
self.twList.resizeColumnsToContents()
self.twList.selectRow(0)
path = self.current_match_set.ca.path
self.setWindowTitle(
u"Select correct match or skip ({0} of {1}): {2}".format(
self.current_match_set_idx + 1,
len(self.match_set_list),
os.path.split(path)[1])
)
def populateTable(self):
while self.twList.rowCount() > 0:
self.twList.removeRow(0)
self.twList.setSortingEnabled(False)
row = 0
for match in self.current_match_set.matches:
self.twList.insertRow(row)
item_text = match['series']
item = QtGui.QTableWidgetItem(item_text)
item.setData(QtCore.Qt.ToolTipRole, item_text)
item.setData(QtCore.Qt.UserRole, (match,))
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.twList.setItem(row, 0, item)
if match['publisher'] is not None:
item_text = u"{0}".format(match['publisher'])
else:
item_text = u"Unknown"
item = QtGui.QTableWidgetItem(item_text)
item.setData(QtCore.Qt.ToolTipRole, item_text)
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.twList.setItem(row, 1, item)
month_str = u""
year_str = u"????"
if match['month'] is not None:
month_str = u"-{0:02d}".format(int(match['month']))
if match['year'] is not None:
year_str = u"{0}".format(match['year'])
item_text = year_str + month_str
item = QtGui.QTableWidgetItem(item_text)
item.setData(QtCore.Qt.ToolTipRole, item_text)
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.twList.setItem(row, 2, item)
item_text = match['issue_title']
if item_text is None:
item_text = ""
item = QtGui.QTableWidgetItem(item_text)
item.setData(QtCore.Qt.ToolTipRole, item_text)
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.twList.setItem(row, 3, item)
row += 1
self.twList.resizeColumnsToContents()
self.twList.setSortingEnabled(True)
self.twList.sortItems(2, QtCore.Qt.AscendingOrder)
self.twList.selectRow(0)
self.twList.resizeColumnsToContents()
self.twList.horizontalHeader().setStretchLastSection(True)
def cellDoubleClicked(self, r, c):
self.accept()
def currentItemChanged(self, curr, prev):
if curr is None:
return
if prev is not None and prev.row() == curr.row():
return
self.altCoverWidget.setIssueID(self.currentMatch()['issue_id'])
if self.currentMatch()['description'] is None:
self.teDescription.setText("")
else:
self.teDescription.setText(self.currentMatch()['description'])
def setCoverImage(self):
ca = self.current_match_set.ca
self.archiveCoverWidget.setArchive(ca)
def currentMatch(self):
row = self.twList.currentRow()
match = self.twList.item(row, 0).data(
QtCore.Qt.UserRole).toPyObject()[0]
return match
def accept(self):
self.saveMatch()
self.current_match_set_idx += 1
if self.current_match_set_idx == len(self.match_set_list):
# no more items
QtGui.QDialog.accept(self)
else:
self.updateData()
def skipToNext(self):
self.current_match_set_idx += 1
if self.current_match_set_idx == len(self.match_set_list):
# no more items
QtGui.QDialog.reject(self)
else:
self.updateData()
def reject(self):
reply = QtGui.QMessageBox.question(
self,
self.tr("Cancel Matching"),
self.tr("Are you sure you wish to cancel the matching process?"),
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
return
QtGui.QDialog.reject(self)
def saveMatch(self):
match = self.currentMatch()
ca = self.current_match_set.ca
md = ca.readMetadata(self.style)
if md.isEmpty:
md = ca.metadataFromFilename()
# now get the particular issue data
cv_md = self.fetch_func(match)
if cv_md is None:
QtGui.QMessageBox.critical(self, self.tr("Network Issue"), self.tr(
"Could not connect to Comic Vine to get issue details!"))
return
QtGui.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.WaitCursor))
md.overlay(cv_md)
success = ca.writeMetadata(md, self.style)
ca.loadCache([MetaDataStyle.CBI, MetaDataStyle.CIX])
QtGui.QApplication.restoreOverrideCursor()
if not success:
QtGui.QMessageBox.warning(self, self.tr("Write Error"), self.tr(
"Saving the tags to the archive seemed to fail!"))
| gpl-3.0 | -8,156,623,721,833,608,000 | 33.759184 | 80 | 0.62776 | false |
xen0n/moehime | moehime/saimoe/datasource/livedoor_thread.py | 1 | 3107 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# moehime / saimoe / data source - livedoor - thread
#
# Copyright (C) 2012 Wang Xuerui <[email protected]>
#
# This file is part of moehime.
#
# moehime is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# moehime is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with moehime. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals, division
import re
import datetime
from pyquery import PyQuery as pq
from .base import PostThreadInfoBase
from ..exc import ThreadBadTitleError
# RE_NUM_REPLIES = re.compile(r'^\((\d+)\)$')
RE_SIMPLE_TID = re.compile(r'read.cgi/anime/10101/(\d+)/$')
RE_MTIME = re.compile(
r'投稿日:\s+'
r'(?P<Y>\d{4})/(?P<m>\d{2})/(?P<d>\d{2})\([日月水火木金土]\)\s+'
r'(?P<H>\d{2}):(?P<M>\d{2}):(?P<S>\d{2})'
)
VOTE_THREAD_SIGNATURE = '投票スレ'
class PostThreadInfo_livedoor(PostThreadInfoBase):
def __init__(self, raw_thread, datasource):
super(PostThreadInfo_livedoor, self).__init__(raw_thread, datasource)
dl = pq(raw_thread)
# header
hdr_elem = dl(b'table:first')
title = hdr_elem(b'font[color = "#FF0000"]').text()
# only interested in vote threads
# 只对票箱感兴趣
if VOTE_THREAD_SIGNATURE not in title:
raise ThreadBadTitleError
# this is of format "(Res:\d+)"
num_replies_txt = hdr_elem(b'tr>td:first>b + font').text()
num_replies = int(num_replies_txt[5:-1])
# again walking the table layout
href = hdr_elem(b'tr>td:last>font>a:first').attr('href')
# extract thread id from href
# 从链接地址里提取线索 id
tid = RE_SIMPLE_TID.search(href).group(1)
# extract last reply time from <dt>
# 从 <dt> 中提取最后回复时间
lastreply_text = dl(b'dt:last').text()
mtime_match = RE_MTIME.search(lastreply_text)
if mtime_match is None:
raise RuntimeError('mtime RE match failed')
m_grp = lambda x: int(mtime_match.group(x))
mtime = datetime.datetime(
m_grp('Y'),
m_grp('m'),
m_grp('d'),
m_grp('H'),
m_grp('M'),
m_grp('S'),
)
# populate self
# 填充数据
self.title = title
self.tid = tid
self.num_replies = num_replies
self.mtime = mtime
# range information
# 范围信息
self.start = 1
self.end = None
# vim:ai:et:ts=4:sw=4:sts=4:ff=unix:fenc=utf-8:
| agpl-3.0 | -5,313,366,697,778,717,000 | 29.373737 | 77 | 0.597938 | false |
pgjones/flake8-sql | flake8_sql/parser.py | 1 | 3054 | from typing import Any, Generator, List, Tuple
import sqlparse
from .keywords import ROOT_KEYWORDS
class Token:
def __init__(self, token: sqlparse.sql.Token, row: int, col: int, depth: int) -> None:
self._token = token
self.row = row
self.col = col
self.depth = depth
@property
def is_whitespace(self) -> bool:
return self._token.is_whitespace
@property
def is_keyword(self) -> bool:
return self._token.is_keyword
@property
def is_root_keyword(self) -> bool:
if not self.is_keyword:
return False
value = self.value.split()[-1].upper()
if value == "FROM" and isinstance(self._token.parent.parent, sqlparse.sql.Function):
return False
return value in ROOT_KEYWORDS
@property
def is_function_name(self) -> bool:
# Note the only name-token who's grandparent is a function is
# the function identifier.
return (
self._token.ttype == sqlparse.tokens.Name and
self._token.within(sqlparse.sql.Function) and
isinstance(self._token.parent.parent, sqlparse.sql.Function) and
sqlparse.keywords.is_keyword(self._token.value)[0] == sqlparse.tokens.Token.Keyword
)
@property
def is_name(self) -> bool:
return self._token.ttype == sqlparse.tokens.Name and not self.is_keyword
@property
def is_punctuation(self) -> bool:
return self._token.ttype == sqlparse.tokens.Punctuation
@property
def is_comparison(self) -> bool:
return self._token.ttype == sqlparse.tokens.Comparison
@property
def is_newline(self) -> bool:
return self._token.ttype == sqlparse.tokens.Text.Whitespace.Newline
@property
def value(self) -> str:
return self._token.value
class Parser:
def __init__(self, sql: str, initial_offset: int) -> None:
self._initial_offset = initial_offset
self._tokens = [] # type: Tuple[sqlparse.sql.Token, int]
depth = 0
for statement in sqlparse.parse(sql):
for token in statement.tokens:
if token.is_group:
self._tokens.extend(_flatten_group(token, depth))
else:
self._tokens.append((token, depth))
def __iter__(self) -> Generator[Token, Any, None]:
row = 0
col = self._initial_offset
for sql_token, depth in self._tokens:
token = Token(sql_token, row, col, depth)
yield token
if token.is_newline:
row += 1
col = 0
else:
col += len(token.value)
def _flatten_group(token: sqlparse.sql.Token, depth: int = 0) -> List[sqlparse.sql.Token]:
tokens = []
for item in token.tokens:
if item.ttype == sqlparse.tokens.DML:
depth += 1
if item.is_group:
tokens.extend(_flatten_group(item, depth))
else:
tokens.append((item, depth))
return tokens
| mit | 4,637,596,389,222,792,000 | 29.54 | 95 | 0.584152 | false |
gkc1000/pyscf | pyscf/pbc/dft/test/test_kuks.py | 1 | 2244 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import unittest
import numpy as np
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import dft as pbcdft
L = 4.
cell = pbcgto.Cell()
cell.verbose = 0
cell.a = np.eye(3)*L
cell.atom =[['He' , ( L/2+0., L/2+0. , L/2+1.)],]
cell.basis = {'He': [[0, (4.0, 1.0)], [0, (1.0, 1.0)]]}
cell.build()
def tearDownModule():
global cell
del cell
class KnownValues(unittest.TestCase):
def test_klda8_cubic_kpt_222_high_cost(self):
cell = pbcgto.Cell()
cell.unit = 'A'
cell.a = '''3.5668 0. 0.
0. 3.5668 0.
0. 0. 3.5668'''
cell.mesh = np.array([17]*3)
cell.atom ='''
C, 0., 0., 0.
C, 0.8917, 0.8917, 0.8917
C, 1.7834, 1.7834, 0.
C, 2.6751, 2.6751, 0.8917
C, 1.7834, 0. , 1.7834
C, 2.6751, 0.8917, 2.6751
C, 0. , 1.7834, 1.7834
C, 0.8917, 2.6751, 2.6751'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 5
cell.output = '/dev/null'
cell.build()
kpts = cell.make_kpts((2,2,2), with_gamma_point=False)
mf = pbcdft.KUKS(cell, kpts)
mf.conv_tol = 1e-9
mf.xc = 'lda,vwn'
e1 = mf.scf()
self.assertAlmostEqual(e1, -45.42583489512954, 8)
def test_rsh_df(self):
mf = pbcdft.KUKS(cell).density_fit()
mf.xc = 'camb3lyp'
mf.omega = .15
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -2.399571378419408, 7)
if __name__ == '__main__':
print("Full Tests for pbc.dft.kuks")
unittest.main()
| apache-2.0 | -1,019,804,087,863,395,600 | 27.405063 | 74 | 0.593583 | false |
pizzathief/numpy | numpy/distutils/misc_util.py | 1 | 84661 | from __future__ import division, absolute_import, print_function
import os
import re
import sys
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import multiprocessing
import textwrap
import distutils
from distutils.errors import DistutilsError
try:
from threading import local as tlocal
except ImportError:
from dummy_threading import local as tlocal
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
if _tmpdirs is not None:
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
from numpy.distutils.compat import get_exception
from numpy.compat import basestring
from numpy.compat import npy_load_module
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib(object):
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
processors on the system, with a maximum of 8 (to prevent
overloading the system if there a lot of CPUs).
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
cpu_count = min(cpu_count, 8)
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
with open(config_file) as fid:
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def sorted_glob(fileglob):
"""sorts output of python glob for https://bugs.python.org/issue30461
to allow extensions to have reproducible build results"""
return sorted(glob.glob(fileglob))
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = sorted_glob(n)
p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_version():
"Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
else:
msc_ver = None
return msc_ver
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
ver = msvc_runtime_major ()
if ver:
if ver < 140:
return "msvcr%i" % ver
else:
return "vcruntime%i" % ver
else:
return None
def msvc_runtime_major():
"Return major version of MSVC runtime coded like get_build_msvc_version"
major = {1300: 70, # MSVC 7.0
1310: 71, # MSVC 7.1
1400: 80, # MSVC 8
1500: 90, # MSVC 9 (aka 2008)
1600: 100, # MSVC 10 (aka 2010)
1900: 140, # MSVC 14 (aka 2015)
}.get(msvc_runtime_version(), None)
return major
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
with open(source, 'r') as f:
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
return modules
def is_string(s):
return isinstance(s, basestring)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except Exception:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' in s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
"""
Return commandline representation used to determine if a file needs
to be recompiled
"""
cmdline = 'commandline: '
cmdline += ' '.join(cc_args)
cmdline += ' '.join(extra_postargs)
cmdline += ' '.join(pp_opts) + '\n'
return cmdline
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149. For Python 3.2 this is implemented on
Linux, but not on OS X.
"""
confvars = distutils.sysconfig.get_config_vars()
# SO is deprecated in 3.3.1, use EXT_SUFFIX instead
so_ext = confvars.get('EXT_SUFFIX', None)
if so_ext is None:
so_ext = confvars.get('SO', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = npy_load_module('_'.join(n.split('.')),
setup_py,
('.py', 'U', 1))
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
Cross-compilation
-----------------
When cross-compiling with numpy distutils, it might be necessary to
use modified npy-pkg-config files. Using the default/generated files
will link with the host libraries (i.e. libnpymath.a). For
cross-compilation you of-course need to link with target libraries,
while using the host Python installation.
You can copy out the numpy/core/lib/npy-pkg-config directory, add a
pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
variable to point to the directory with the modified npy-pkg-config
files.
Example npymath.ini modified for cross-compilation::
[meta]
Name=npymath
Description=Portable, core math library implementing C99 standard
Version=0.1
[variables]
pkgname=numpy.core
pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
prefix=${pkgdir}
libdir=${prefix}/lib
includedir=${prefix}/include
[default]
Libs=-L${libdir} -lnpymath
Cflags=-I${includedir}
Requires=mlib
[msvc]
Libs=/LIBPATH:${libdir} npymath.lib
Cflags=/INCLUDE:${includedir}
Requires=mlib
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(
['svnversion'], shell=True, cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg identify --num'], shell=True, cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = npy_load_module('_'.join(n.split('.')),
fn, info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory.
If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
is returned. Otherwise, a path inside the location of the numpy module is
returned.
The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
customized npy-pkg-config .ini files for the cross-compilation
environment, and using them when cross-compiling.
"""
# XXX: import here for bootstrapping reasons
import numpy
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d is not None:
return d
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
), stacklevel=2)
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
with open(target, 'w') as f:
f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
# For gfortran+msvc combination, extra shared libraries may exist
f.write(textwrap.dedent("""
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.environ.setdefault('PATH', '')
os.environ['PATH'] += os.pathsep + extra_dll_dir
"""))
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(textwrap.dedent(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
'''))
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
def get_build_architecture():
# Importing distutils.msvccompiler triggers a warning on non-Windows
# systems, so delay the import to here.
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
| bsd-3-clause | -7,108,239,796,244,576,000 | 34.67678 | 102 | 0.544808 | false |
yunohost-bot/apps | list_builder.py | 1 | 11421 | #!/usr/bin/env python2
import re
import os
import sys
import time
import json
import zlib
import argparse
import requests
from dateutil.parser import parse
# Regular expression patterns
re_commit_author = re.compile(
r'^author (?P<name>.+) <(?P<email>.+)> (?P<time>\d+) (?P<tz>[+-]\d+)$',
re.MULTILINE
)
# Helpers
def fail(msg, retcode=1):
"""Show failure message and exit."""
print("Error: {0:s}".format(msg))
sys.exit(retcode)
def include_translations_in_manifest(app_name, manifest):
for i in os.listdir("locales"):
if not i.endswith("json"):
continue
if i == "en.json":
continue
current_lang = i.split(".")[0]
translations = json.load(open(os.path.join("locales", i), "r"))
key = "%s_manifest_description" % app_name
if key in translations and translations[key]:
manifest["description"][current_lang] = translations[key]
for category, questions in manifest["arguments"].items():
for question in questions:
key = "%s_manifest_arguments_%s_%s" % (app_name, category, question["name"])
# don't overwrite already existing translation in manifests for now
if key in translations and translations[key] and not current_lang not in question["ask"]:
print "[ask]", current_lang, key
question["ask"][current_lang] = translations[key]
key = "%s_manifest_arguments_%s_help_%s" % (app_name, category, question["name"])
# don't overwrite already existing translation in manifests for now
if key in translations and translations[key] and not current_lang not in question.get("help", []):
print "[help]", current_lang, key
question["help"][current_lang] = translations[key]
return manifest
def get_json(url, verify=True):
try:
# Retrieve and load manifest
if ".github" in url:
r = requests.get(url, verify=verify, auth=token)
else:
r = requests.get(url, verify=verify)
r.raise_for_status()
return r.json()
except requests.exceptions.RequestException as e:
print("-> Error: unable to request %s, %s" % (url, e))
return None
except ValueError as e:
print("-> Error: unable to decode json from %s : %s" % (url, e))
return None
def get_zlib(url, verify=True):
try:
# Retrieve last commit information
r = requests.get(obj_url, verify=verify)
r.raise_for_status()
return zlib.decompress(r.content).decode('utf-8').split('\x00')
except requests.exceptions.RequestException as e:
print("-> Error: unable to request %s, %s" % (obj_url, e))
return None
except zlib.error as e:
print("-> Error: unable to decompress object from %s : %s" % (url, e))
return None
# Main
# Create argument parser
parser = argparse.ArgumentParser(description='Process YunoHost application list.')
# Add arguments and options
parser.add_argument("input", help="Path to json input file")
parser.add_argument("-o", "--output", help="Path to result file. If not specified, '-build' suffix will be added to input filename.")
parser.add_argument("-g", "--github", help="Github token <username>:<password>")
# Parse args
args = parser.parse_args()
try:
# Retrieve apps list from json file
with open(args.input) as f:
apps_list = json.load(f)
except IOError as e:
fail("%s file not found" % args.input)
# Get list name from filename
list_name = os.path.splitext(os.path.basename(args.input))[0]
print(":: Building %s list..." % list_name)
# Args default
if not args.output:
args.output = '%s-build.json' % list_name
already_built_file = {}
if os.path.exists(args.output):
try:
already_built_file = json.load(open(args.output))
except Exception as e:
print("Error while trying to load already built file: %s" % e)
# GitHub credentials
if args.github:
token = (args.github.split(':')[0], args.github.split(':')[1])
else:
token = None
# Loop through every apps
result_dict = {}
for app, info in apps_list.items():
print("---")
print("Processing '%s'..." % app)
app = app.lower()
# Store usefull values
app_branch = info['branch']
app_url = info['url']
app_rev = info['revision']
app_state = info["state"]
app_level = info.get("level")
app_maintained = info.get("maintained", True)
forge_site = app_url.split('/')[2]
owner = app_url.split('/')[3]
repo = app_url.split('/')[4]
if forge_site == "github.com":
forge_type = "github"
elif forge_site == "framagit.org":
forge_type = "gitlab"
elif forge_site == "code.ffdn.org":
forge_type = "gogs"
else:
forge_type = "unknown"
previous_state = already_built_file.get(app, {}).get("state", {})
manifest = {}
timestamp = None
previous_rev = already_built_file.get(app, {}).get("git", {}).get("revision", None)
previous_url = already_built_file.get(app, {}).get("git", {}).get("url")
previous_level = already_built_file.get(app, {}).get("level")
previous_maintained = already_built_file.get(app, {}).get("maintained")
if forge_type == "github" and app_rev == "HEAD":
if previous_rev is None:
previous_rev = 'HEAD'
url = "https://api.github.com/repos/{}/{}/git/refs/heads/{}".format(owner, repo, app_branch)
head = get_json(url)
app_rev = head["object"]["sha"]
url = "https://api.github.com/repos/{}/{}/compare/{}...{}".format(owner, repo, previous_rev, app_branch)
diff = get_json(url)
if not diff["commits"]:
app_rev = previous_rev if previous_rev != 'HEAD' else app_rev
else:
# Only if those files got updated, do we want to update the
# commit (otherwise that would trigger an unecessary upgrade)
ignore_files = [ "README.md", "LICENSE", ".gitignore", "check_process", ".travis.yml" ]
diff_files = [ f for f in diff["files"] if f["filename"] not in ignore_files ]
if diff_files:
print("This app points to HEAD and significant changes where found between HEAD and previous commit")
app_rev = diff["commits"][-1]["sha"]
else:
print("This app points to HEAD but no significant changes where found compared to HEAD, so keeping the previous commit")
app_rev = previous_rev if previous_rev != 'HEAD' else app_rev
print("Previous commit : %s" % previous_rev)
print("Current commit : %s" % app_rev)
if previous_rev == app_rev and previous_url == app_url:
print("Already up to date, ignoring")
result_dict[app] = already_built_file[app]
if previous_state != app_state:
result_dict[app]["state"] = app_state
print("... but has changed of state, updating it from '%s' to '%s'" % (previous_state, app_state))
if previous_level != app_level or app_level is None:
result_dict[app]["level"] = app_level
print("... but has changed of level, updating it from '%s' to '%s'" % (previous_level, app_level))
if previous_maintained != app_maintained:
result_dict[app]["maintained"] = app_maintained
print("... but maintained status changed, updating it from '%s' to '%s'" % (previous_maintained, app_maintained))
print "update translations but don't download anything"
result_dict[app]['manifest'] = include_translations_in_manifest(app, result_dict[app]['manifest'])
continue
print("Revision changed ! Updating...")
# Hosted on GitHub
if forge_type == "github":
raw_url = 'https://raw.githubusercontent.com/%s/%s/%s/manifest.json' % (
owner, repo, app_rev
)
manifest = get_json(raw_url)
if manifest is None:
continue
api_url = 'https://api.github.com/repos/%s/%s/commits/%s' % (
owner, repo, app_rev
)
info2 = get_json(api_url)
if info2 is None:
continue
commit_date = parse(info2['commit']['author']['date'])
timestamp = int(time.mktime(commit_date.timetuple()))
# Gitlab-type forge
elif forge_type == "gitlab":
raw_url = '%s/raw/%s/manifest.json' % (app_url, app_rev)
manifest = get_json(raw_url, verify=True)
if manifest is None:
continue
api_url = 'https://%s/api/v4/projects/%s%%2F%s/repository/commits/%s' % (forge_site, owner, repo, app_rev)
commit = get_json(api_url)
if commit is None:
continue
commit_date = parse(commit["authored_date"])
timestamp = int(time.mktime(commit_date.timetuple()))
# Gogs-type forge
elif forge_type == "gogs":
if not app_url.endswith('.git'):
app_url += ".git"
raw_url = '%s/raw/%s/manifest.json' % (app_url[:-4], app_rev)
manifest = get_json(raw_url, verify=False)
if manifest is None:
continue
obj_url = '%s/objects/%s/%s' % (
app_url, app_rev[0:2], app_rev[2:]
)
commit = get_zlib(obj_url, verify=False)
if commit is None or len(commit) < 2:
continue
else:
commit = commit[1]
# Extract author line and commit date
commit_author = re_commit_author.search(commit)
if not commit_author:
print("-> Error: author line in commit not found")
continue
# Construct UTC timestamp
timestamp = int(commit_author.group('time'))
tz = commit_author.group('tz')
if len(tz) != 5:
print("-> Error: unexpected timezone length in commit")
continue
elif tz != '+0000':
tdelta = (int(tz[1:3]) * 3600) + (int(tz[3:5]) * 60)
if tz[0] == '+':
timestamp -= tdelta
elif tz[0] == '-':
timestamp += tdelta
else:
print("-> Error: unexpected timezone format in commit")
continue
else:
print("-> Error: unsupported VCS and/or protocol")
continue
if manifest["id"] != app or manifest["id"] != repo.replace("_ynh", ""):
print("Warning: IDs different between community.json, manifest and repo name")
print(" Manifest id : %s" % manifest["id"])
print(" Name in community json : %s" % app)
print(" Repo name : %s" % repo.replace("_ynh", ""))
try:
result_dict[manifest['id']] = {
'git': {
'branch': info['branch'],
'revision': app_rev,
'url': app_url
},
'lastUpdate': timestamp,
'manifest': include_translations_in_manifest(manifest['id'], manifest),
'state': info['state'],
'level': info.get('level', '?'),
'maintained': app_maintained
}
except KeyError as e:
print("-> Error: invalid app info or manifest, %s" % e)
continue
# Write resulting file
with open(args.output, 'w') as f:
f.write(json.dumps(result_dict, sort_keys=True))
print("\nDone! Written in %s" % args.output)
| gpl-3.0 | -4,528,834,335,519,708,700 | 33.504532 | 136 | 0.57762 | false |
osrf/osrf_hw | kicad_scripts/check_component_availability.py | 1 | 1943 | #!/usr/bin/env python
from __future__ import print_function
import csv
import os
import sys
import urllib2
def import_csv(filename):
if not os.path.isfile(filename):
print('pinfile not found')
return
with open(filename, mode='r') as infile:
reader = csv.reader(infile)
part_dict = {rows[5]:rows[8] for rows in reader}
return part_dict
def check_availability(url, website):
webpage = urllib2.urlopen(url)
content = webpage.read()
if website == 'digikey':
str_qty = 'Digi-Key Stock: ' # quantityAvailable":"'
idx = content.find(str_qty)
if idx == -1:
return 0
idx2 = content[idx + len(str_qty):].find(' ')
result = content[idx + len(str_qty):idx + len(str_qty) + idx2]
result = result.replace(',','')
elif website == 'mouser':
str_qty = ' Can Ship Immediately'
idx = content.find(str_qty)
print('idx: {}'.format(idx))
if idx == -1:
return -1
idx2 = content[:str_qty].rfind('\n')
result = content[idx2:idx]
else:
return -1
return int(result)
if len(sys.argv) != 2:
print('invalid number of arguments')
sys.exit()
if not os.path.isfile(sys.argv[1]):
print('BOM file {} does not exist'.format(sys.argv[1]))
sys.exit()
filepath = sys.argv[1]
dictionary = import_csv(filepath)
unavailable_components = []
for mfp, url in dictionary.items():
qty = -1
if not mfp in ['MFP', 'DNP']:
print('poking {}'.format(url))
if url.find('digikey.com') != -1:
qty = check_availability(url, 'digikey')
# FIXME not working for mouser for now
# elif url.find('mouser.com') != -1:
# qty = check_availability(url, 'mouser')
if qty <= 1:
unavailable_components.append(mfp)
print('checked availability of {}: {} remaining'.format(mfp, qty))
print(unavailable_components)
| apache-2.0 | 1,384,212,045,585,071,900 | 29.84127 | 74 | 0.587751 | false |
jgamblin/btrecon | btrecon.py | 1 | 1501 |
#!/usr/bin/env python
# Name: btrecon.py
# Purpose: Bluetooth Scanner
# By: Jerry Gamblin
# Date: 02.11.15
# Modified 02.11.15
# Rev Level 0.5
# -----------------------------------------------
import os
import re
import time
import sys
import subprocess
import readline
def color(text, color_code):
if sys.platform == "win32" and os.getenv("TERM") != "xterm":
return text
return '\x1b[%dm%s\x1b[0m' % (color_code, text)
def red(text):
return color(text, 31)
def blink(text):
return color(text, 5)
def green(text):
return color(text, 32)
def blue(text):
return color(text, 34)
#clean up old files
os.system("rm -rf devices.txt")
os.system("rm -rf btaddresses.txt")
print "\n"
print "Finding Bluetooth Devices..."
os.system("hcitool -i hci0 scan > devices.txt")
print "Found The Following Devices:"
os.system("cat devices.txt | grep -i '[0-9A-F]\{2\}\(:[0-9A-F]\{2\}\)\{5\}'")
os.system("cat devices.txt | grep -o '[0-9A-F]\{2\}\(:[0-9A-F]\{2\}\)\{5\}' > btaddresses.txt")
b = open('btaddresses.txt')
macs = b.readlines()
b.close()
print "\n"
print "Starting Information Gathering"
print "\n"
for mac in macs:
print (green("Information about %s" % mac))
subprocess.call("hcitool name %s" % mac, shell=True)
print "\n"
subprocess.call("hcitool info %s" % mac, shell=True)
print "\n"
subprocess.call("sdptool records %s" % mac, shell=True)
print "\n"
subprocess.call("sdptool browse %s" % mac, shell=True)
print "\n"
print "\n"
| mit | -6,145,480,689,312,573,000 | 20.753623 | 95 | 0.623584 | false |
CGATOxford/CGATPipelines | CGATPipelines/PipelineWindows.py | 1 | 39870 | '''PipelineWindows - Tasks for window based read distribution analysis
======================================================================
Requirements:
* bedtools >= 2.21.0
* picardtools >= 1.106
* samtools >= 1.1
* MEDIPS >= 1.15.0
Reference
---------
'''
import os
import re
import collections
import pandas
import math
import numpy
import numpy.ma as ma
import itertools
import CGAT.Experiment as E
import CGATPipelines.Pipeline as P
import CGAT.BamTools as BamTools
import CGAT.IOTools as IOTools
import CGAT.Expression as Expression
import CGAT.Bed as Bed
def convertReadsToIntervals(bamfile,
bedfile,
filtering_quality=None,
filtering_dedup=None,
filtering_dedup_method='picard',
filtering_nonunique=False):
'''convert reads in *bamfile* to *intervals*.
This method converts read data into intervals for
counting based methods.
This method is not appropriate for RNA-Seq.
Optional steps include:
For paired end data, pairs are merged and optionally
filtered by insert size.
Arguments
---------
bamfile : string
Filename of input file in :term:`bam` format.
bedfile : string
Filename of output file in :term:`bed` format.
filtering_quality : int
If set, remove reads with a quality score below given threshold.
filtering_dedup : bool
If True, deduplicate data.
filtering_dedup_method : string
Deduplication method. Possible options are ``picard`` and
``samtools``.
filtering_nonunique : bool
If True, remove non-uniquely matching reads.
'''
track = P.snip(bedfile, ".bed.gz")
is_paired = BamTools.isPaired(bamfile)
current_file = bamfile
tmpdir = P.getTempFilename()
os.unlink(tmpdir)
statement = ["mkdir %(tmpdir)s"]
nfiles = 0
if filtering_quality > 0:
next_file = "%(tmpdir)s/bam_%(nfiles)i.bam" % locals()
statement.append('''samtools view
-q %(filtering_quality)i -b
%(current_file)s
2>> %%(bedfile)s.quality.log
> %(next_file)s ''' % locals())
nfiles += 1
current_file = next_file
if filtering_nonunique:
next_file = "%(tmpdir)s/bam_%(nfiles)i.bam" % locals()
statement.append('''cat %(current_file)s
| cgat bam2bam
--method=filter
--filter-method=unique,mapped
--log=%%(bedfile)s.nonunique.log
> %(next_file)s ''' % locals())
nfiles += 1
current_file = next_file
if filtering_dedup is not None:
# Picard's MarkDuplicates requries an explicit bam file.
next_file = "%(tmpdir)s/bam_%(nfiles)i.bam" % locals()
if filtering_dedup_method == 'samtools':
statement.append('''samtools rmdup - - ''')
elif filtering_dedup_method == 'picard':
statement.append('''picard MarkDuplicates
INPUT=%(current_file)s
OUTPUT=%(next_file)s
ASSUME_SORTED=TRUE
METRICS_FILE=%(bedfile)s.duplicate_metrics
REMOVE_DUPLICATES=TRUE
VALIDATION_STRINGENCY=SILENT
2>> %%(bedfile)s.markdup.log ''' % locals())
nfiles += 1
current_file = next_file
if is_paired:
statement.append('''cat %(current_file)s
| cgat bam2bed
--merge-pairs
--min-insert-size=%(filtering_min_insert_size)i
--max-insert-size=%(filtering_max_insert_size)i
--log=%(bedfile)s.bam2bed.log
-
| cgat bed2bed
--method=sanitize-genome
--genome-file=%(genome_dir)s/%(genome)s
--log=%(bedfile)s.sanitize.log
| cut -f 1,2,3,4
| sort -k1,1 -k2,2n
| bgzip > %(bedfile)s''')
else:
statement.append('''cat %(current_file)s
| cgat bam2bed
--log=%(bedfile)s.bam2bed.log
-
| cgat bed2bed
--method=sanitize-genome
--genome-file=%(genome_dir)s/%(genome)s
--log=%(bedfile)s.sanitize.log
| cut -f 1,2,3,4
| sort -k1,1 -k2,2n
| bgzip > %(bedfile)s''')
statement.append("tabix -p bed %(bedfile)s")
statement.append("rm -rf %(tmpdir)s")
statement = " ; checkpoint; ".join(statement)
P.run()
def countTags(infile, outfile):
'''count number of tags in bed-file.
`outfile` will contain the number of tags in `infile`
counted per chromosome.
Arguments
=========
infile : string
Input filename in :term:`bed` format
outfile : string
Output filename in :term:`tsv` format.
'''
statement = '''zcat %(infile)s
| cgat bed2stats
--per-contig
--log=%(outfile)s.log
>& %(outfile)s'''
P.run()
def countTagsWithinWindows(tagfile,
windowfile,
outfile,
counting_method="midpoint",
job_memory="4G"):
'''count tags within windows.
Counting is done using bedtools.
Arguments
---------
tagfile : string
Filename with tags to be counted in :term:`bed` format.
windowfile : string
Filename with windows in :term:`bed` format.
outfile : outfile
Output filename in :term:`bed` format.
counting_method : string
Counting method to use. Possible values are ``nucleotide``
and ``midpoint``.
midpoint counts the number of reads overlapping the midpoint of the
window by at least one base
nucleotide counts the number of reads overlapping the window by at
least one base.
job_memory : string
Amount of memory to allocate.
'''
if counting_method == "midpoint":
f = '''| awk '{a = $2+($3-$2)/2;
printf("%s\\t%i\\t%i\\n", $1, a, a+1)}' '''
elif counting_method == "nucleotide":
f = ""
else:
raise ValueError("unknown counting method: %s" % counting_method)
# Note that in version 2.26, coverage changed from reporting
# A on B to B on A.
statement = '''
zcat %(tagfile)s
%(f)s
| bedtools coverage -a %(windowfile)s -b stdin -split
| sort -k1,1 -k2,2n -k3,3n -k4,4
| gzip
> %(outfile)s
'''
P.run()
def aggregateWindowsTagCounts(infiles,
outfile,
regex="(.*)\..*"):
'''aggregate output from several ``bedtools coverage`` results.
``bedtools coverage`` outputs the following columns for a bed4
file::
1 Contig
2 Start
3 Stop
4 Name
5 The number of features in A that overlapped (by at least one
base pair) the B interval.
6 The number of bases in B that had non-zero coverage from features in A.
7 The length of the entry in B.
8 The fraction of bases in B that had non-zero coverage from
features in A.
This method autodetects the number of columns in the :term:`infiles`
and selects:
* bed4: use column 5
* bed6: use column 7
* bed12: use column 13
Arguments
---------
infiles : list
Input filenames with the output from ``bedtools coverage``
outfile : string
Output filename in :term:`tsv` format.
regex : string
Regular expression used to extract the track name from the
filename. The default removes any suffix.
'''
# get bed format
bed_columns = Bed.getNumColumns(infiles[0])
# +1 as awk is 1-based
column = bed_columns - 4 + 1
src = " ".join(["""<( zcat %s |
awk '{printf("%%s:%%i-%%i\\t%%i\\n", $1,$2,$3,$%s );}')""" %
(x, column) for x in infiles])
tmpfile = P.getTempFilename(".")
statement = '''paste %(src)s > %(tmpfile)s'''
P.run()
# build track names
tracks = [re.search(regex, os.path.basename(x)).groups()[0]
for x in infiles]
outf = IOTools.openFile(outfile, "w")
outf.write("interval_id\t%s\n" % "\t".join(tracks))
# filter for uniqueness - keys with the same value as the
# previous line will be ignored.
last_gene = None
c = E.Counter()
for line in open(tmpfile, "r"):
c.input += 1
data = line[:-1].split("\t")
genes = list(set([data[x] for x in range(0, len(data), 2)]))
values = [int(data[x]) for x in range(1, len(data), 2)]
assert len(genes) == 1, \
"paste command failed, wrong number of genes per line: '%s'" % line
if genes[0] == last_gene:
c.duplicates += 1
continue
c.output += 1
outf.write("%s\t%s\n" % (genes[0], "\t".join(map(str, values))))
last_gene = genes[0]
outf.close()
os.unlink(tmpfile)
E.info("aggregateWindowsTagCounts: %s" % c)
def normalizeTagCounts(infile, outfile, method):
'''normalize Tag counts
Parameters
----------
infile : string
Input filename of file with counts.
outfile : string
Output filename with normalized counts.
method : string
Method to use for normalization.
can be deseq-size factors, total-column, total-row, total-count
deseq-size-factors - use normalisation implemented in DEseq
total-column - divide counts by column total
total-row - divide counts by the value in a row called 'total'
total-count - normalised all values in column by the ratio of the
per column sum of counts and the average column count
across all rows.
'''
statement = '''
zcat %(infile)s
| cgat counts2counts
--method=normalize
--normalization-method=%(method)s
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
def buildDMRStats(infiles, outfile, method, fdr_threshold=None):
'''build dmr summary statistics.
This method works from output files created by Expression.py
(method="deseq" or method="edger") or runMEDIPS (method="medips")
This method counts the number of up/down, 2fold up/down, etc.
genes in output from (:mod:`scripts/runExpression`).
This method also creates diagnostic plots in the
<exportdir>/<method> directory.
Arguments
---------
infiles ; list
List of tabs with DMR output
outfile : string
Output filename. Tab separated file summarizing
method : string
Method name
fdr_threshold : float
FDR threshold to apply. Currently unused.
'''
results = collections.defaultdict(lambda: collections.defaultdict(int))
status = collections.defaultdict(lambda: collections.defaultdict(int))
# deseq/edger
def f_significant(x):
return x.significant == "1"
def f_up(x):
return float(x.l2fold) > 0
def f_down(x):
return float(x.l2fold) < 0
def f_fold2up(x):
return float(x.l2fold) > 1
def f_fold2down(x):
return float(x.l2fold) < -1
def f_key(x):
return (x.treatment_name, x.control_name)
def f_status(x):
return x.status
outf = IOTools.openFile(outfile, "w")
is_first = True
for infile in infiles:
xx = 0
for line in IOTools.iterate(IOTools.openFile(infile)):
key = f_key(line)
r, s = results[key], status[key]
r["tested"] += 1
ss = f_status(line)
s[ss] += 1
if ss != "OK":
continue
is_significant = f_significant(line)
up = f_up(line)
down = f_down(line)
fold2up = f_fold2up(line)
fold2down = f_fold2down(line)
fold2 = fold2up or fold2down
if up:
r["up"] += 1
if down:
r["down"] += 1
if fold2up:
r["l2fold_up"] += 1
if fold2down:
r["l2fold_down"] += 1
if is_significant:
r["significant"] += 1
if up:
r["significant_up"] += 1
if down:
r["significant_down"] += 1
if fold2:
r["fold2"] += 1
if fold2up:
r["significant_l2fold_up"] += 1
if fold2down:
r["significant_l2fold_down"] += 1
if xx > 10000:
break
if is_first:
is_first = False
header1, header2 = set(), set()
for r in list(results.values()):
header1.update(list(r.keys()))
for s in list(status.values()):
header2.update(list(s.keys()))
header = ["method", "treatment", "control"]
header1 = list(sorted(header1))
header2 = list(sorted(header2))
outf.write("\t".join(header + header1 + header2) + "\n")
for treatment, control in list(results.keys()):
key = (treatment, control)
r = results[key]
s = status[key]
outf.write("%s\t%s\t%s\t" % (method, treatment, control))
outf.write("\t".join([str(r[x]) for x in header1]) + "\t")
outf.write("\t".join([str(s[x]) for x in header2]) + "\n")
def buildFDRStats(infile, outfile, method):
'''compute number of windows called at different FDR.
.. note::
This method is incomplete
Arguments
---------
infile : string
Input filename in :term:`tsv` format. Typically the output
from :mod:`scripts/runExpression`.
outfile : string
Output filename in :term:`tsv` format.
method : string
Method name.
'''
raise NotImplementedError("function is incomplete")
data = pandas.read_csv(IOTools.openFile(infile), sep="\t", index_col=0)
assert data['treatment_name'][0] == data['treatment_name'][-1]
assert data['control_name'][0] == data['control_name'][-1]
treatment_name, control_name = data[
'treatment_name'][0], data['control_name'][0]
key = (treatment_name, control_name)
fdrs = (0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
for fdr in fdrs:
print("fdr")
take = data['qvalue'] <= fdr
significant = sum(take)
print(significant)
def outputAllWindows(infile, outfile):
'''output all windows as a bed file with the l2fold change
as a score.
Arguments
---------
infile : string
Input filename in :term:`tsv` format. Typically the output
from :mod:`scripts/runExpression`.
outfile : string
Output filename in :term:`bed` format.
'''
outf = IOTools.openFile(outfile, "w")
for line in IOTools.iterate(IOTools.openFile(infile)):
outf.write("\t".join(
(line.contig, line.start, line.end,
"%6.4f" % float(line.l2fold))) + "\n")
outf.close()
def outputRegionsOfInterest(design_file, counts_file, outfile,
max_per_sample=10, sum_per_group=40):
'''output windows according to various filters.
The output is a mock analysis similar to a differential expression
result.
Arguments
---------
design_file : string
Filename with experimental design
counts_file : string
:term:`tsv` formatted file with counts per windows
outfile : string
Output filename in :term:`tsv` format
max_per_sample : int
Remove samples with more than threshold counts
sum_per_group : int
Minimum counts per group.
'''
job_memory = "64G"
design = Expression.readDesignFile(design_file)
# remove tracks not included in the design
design = dict([(x, y) for x, y in list(design.items()) if y.include])
# define the two groups
groups = sorted(set([x.group for x in list(design.values())]))
# build a filtering statement
groupA, groupB = groups
def _buildMax(g, threshold):
selected = [x for x, y in list(design.items()) if y.group == g]
if len(selected) > 1:
return "max((%s)) < %f" % (
",".join(
["int(r['%s'])" % x for x in selected]),
threshold)
elif len(selected) == 1:
return "int(r['%s']) < %f" % (selected[0], threshold)
else:
raise ValueError("no groups found for 'g'" % g)
def _buildSum(g, threshold):
selected = [x for x, y in list(design.items()) if y.group == g]
if len(selected) > 1:
return "sum((%s)) > %f" % (
",".join(
["int(r['%s'])" % x for x in selected]),
threshold)
elif len(selected) == 1:
return "int(r['%s']) > %f" % (selected[0], threshold)
else:
raise ValueError("no groups found for 'g'" % g)
upper_levelA = _buildMax(groupA, max_per_sample)
upper_levelB = _buildMax(groupB, max_per_sample)
sum_levelA = _buildSum(groupA, sum_per_group)
sum_levelB = _buildSum(groupB, sum_per_group)
statement = '''
zcat %(counts_file)s
| cgat csv_select
--log=%(outfile)s.log
"(%(upper_levelA)s and %(sum_levelB)s) or
(%(upper_levelB)s and %(sum_levelA)s)"
| cgat runExpression
--log=%(outfile)s.log
--design-tsv-file=%(design_file)s
--tags-tsv-file=-
--method=mock
--filter-min-counts-per-sample=0
| gzip
> %(outfile)s
'''
P.run()
def runDE(design_file,
counts_file,
outfile,
outdir,
method="deseq",
spike_file=None):
'''run DESeq, DESeq2 or EdgeR through :mod:`scripts/runExpression.py`
The job is split into smaller sections. The order of the input
data is randomized in order to avoid any biases due to chromosomes
and break up local correlations.
At the end, a q-value is computed from all results.
Arguments
---------
design_file : string
Filename with experimental design
counts_file : string
:term:`tsv` formatted file with counts per windows
outfile : string
Output filename in :term:`tsv` format.
outdir : string
Directory for additional output files.
method : string
Method to use. See :mod:`scripts/runExpression.py`.
spike_file : string
Filename with spike-in data to add before processing.
'''
if spike_file is None:
statement = "zcat %(counts_file)s"
else:
statement = '''cgat combine_tables
--missing-value=0
--cat=filename
--log=%(outfile)s.log
%(counts_file)s %(spike_file)s
| cgat csv_cut
--remove filename
--log=%(outfile)s.log
'''
prefix = IOTools.snip(os.path.basename(outfile))
E.info(prefix)
# the post-processing strips away the warning,
# renames the qvalue column to old_qvalue
# and adds a new qvalue column after recomputing
# over all windows.
statement += '''
| cgat randomize_lines --keep-header=1
| %(cmd-farm)s
--input-header
--output-header
--split-at-lines=200000
--cluster-options="-l mem_free=16G"
--log=%(outfile)s.log
--output-filename-pattern=%(outdir)s/%%s
--subdirs
--output-regex-header="^test_id"
"cgat runExpression
--method=%(method)s
--tags-tsv-file=-
--design-tsv-file=%(design_file)s
--output-filename-pattern=%%DIR%%%(prefix)s_
--deseq-fit-type=%(deseq_fit_type)s
--deseq-dispersion-method=%(deseq_dispersion_method)s
--deseq-sharing-mode=%(deseq_sharing_mode)s
--edger-dispersion=%(edger_dispersion)f
--deseq2-design-formula=%(deseq2_model)s
--deseq2-contrasts=%(deseq2_contrasts)s
--filter-min-counts-per-row=%(tags_filter_min_counts_per_row)i
--filter-min-counts-per-sample=%(tags_filter_min_counts_per_sample)i
--filter-percentile-rowsums=%(tags_filter_percentile_rowsums)i
--log=%(outfile)s.log
--fdr=%(edger_fdr)f
--deseq2-plot=0"
| perl -p -e "s/qvalue/old_qvalue/"
| cgat table2table
--log=%(outfile)s.log
--method=fdr
--column=pvalue
--fdr-method=BH
--fdr-add-column=qvalue
| gzip
> %(outfile)s '''
E.info(statement)
P.run()
def normalizeBed(countsfile, outfile):
'''normalize counts in a bed file to total library size.
Use :func:`Pipeline.submit` to send to cluster.
Arguments
---------
countsfile : string
Filename with count data in :term:`tsv` format
outfile : string
Output filename in :term:`bedGraph` format.
'''
bed_frame = pandas.read_table(countsfile,
sep="\t",
compression="gzip",
header=0,
index_col=0)
# normalize count column by total library size
# have to explicitly convert data_frame to numpy
# array with int64/float64 data type. Otherwise
# numpy.log will through an Attribute error (wrong
# error to report) as it cannot handle python longs
bed_frame = bed_frame.fillna(0.0)
val_array = numpy.array(bed_frame.values, dtype=numpy.int64)
geom_mean = geoMean(val_array)
ratio_frame = bed_frame.apply(lambda x: x / geom_mean,
axis=0)
size_factors = ratio_frame.apply(numpy.median,
axis=0)
normalize_frame = bed_frame / size_factors
# replace infs and -infs with Nas, then 0s
normalize_frame.replace([numpy.inf, -numpy.inf], numpy.nan, inplace=True)
normalize_frame = normalize_frame.fillna(0.0)
normalize_frame.to_csv(outfile, sep="\t", index_label="interval")
def geoMean(array):
'''
Generate the geometric mean of a list or array,
removing all zero-values but retaining total length
'''
if isinstance(array, pandas.core.frame.DataFrame):
array = array.as_matrix()
else:
pass
non_zero = ma.masked_values(array,
0)
log_a = ma.log(non_zero)
geom_mean = ma.exp(log_a.mean())
return geom_mean
def enrichmentVsInput(infile, outfile):
'''
Calculate the fold enrichment of the test data
vs. the input data
Parameters
----------
infile: list
list of filenames
infile[0]: str
filename of normalised :term:`bedGraph` file showing counts in
the input
infile[1]: str
filename of normalised :term:`bedGraph` files showing
counts in each experiment
outfile: str
filename of output :term:`bedGraph` file
'''
test_frame = pandas.read_table(infile[1],
sep="\t",
compression="gzip",
header=None,
index_col=None)
input_frame = pandas.read_table(infile[0],
sep="\t",
compression="gzip",
header=None,
index_col=None)
merge_frame = pandas.merge(test_frame,
input_frame,
how='left',
left_on=[0, 1, 2],
right_on=[0, 1, 2])
def foldchange(x):
return math.log((x['3_y'] + 1.0) / (x['3_x'] + 1.0), 2)
merge_frame[4] = merge_frame.apply(foldchange, axis=1)
out_frame = merge_frame[[0, 1, 2, 4]]
out_frame.to_csv(outfile,
sep="\t",
header=None,
index=None)
def runMEDIPSQC(infile, outfile):
'''run QC using the MEDIPS package.
The QC data will be stored in the directory
:file:`./medips.dir`
Arguments
---------
infile : string
Filename of :term:`bam` formatted file
outfile : string
Output filename. Containts logging information.
'''
# note that the wrapper adds the filename
# to the output filenames.
job_memory = "10G"
statement = """cgat runMEDIPS
--ucsc-genome=%(medips_genome)s
--treatment=%(infile)s
--toolset=saturation
--toolset=coverage
--toolset=enrichment
--shift=%(medips_shift)s
--extend=%(medips_extension)s
--output-filename-pattern="medips.dir/%%s"
--log=%(outfile)s.log
| gzip
> %(outfile)s
"""
P.run()
def runMEDIPSDMR(design_file, outfile):
'''run differential methylation analysis using MEDIPS package.
Arguments
---------
infile : string
Filename of :term:`bam` formatted file
outfile : string
Output filename in :term:`tsv` format.
'''
job_memory = "30G"
design = Expression.readDesignFile(design_file)
# remove data tracks not needed
design = [(x, y) for x, y in list(design.items()) if y.include]
# build groups
groups = set([y.group for x, y in design])
statements = []
for pair1, pair2 in itertools.combinations(groups, 2):
treatment = ["%s.bam" % x for x, y in design if y.group == pair1]
control = ["%s.bam" % x for x, y in design if y.group == pair2]
treatment = ",".join(treatment)
control = ",".join(control)
# outfile contains directory prefix
statements.append(
"""cgat runMEDIPS
--ucsc-genome=%(medips_genome)s
--treatment=%(treatment)s
--control=%(control)s
--toolset=dmr
--shift=%(medips_shift)s
--extend=%(medips_extension)s
--window-size=%(medips_window_size)i
--output-filename-pattern="%(outfile)s_%(pair1)s_vs_%(pair2)s_%%s"
--fdr-threshold=%(medips_fdr)f
--log=%(outfile)s.log
> %(outfile)s.log2;
checkpoint;
zcat %(outfile)s_%(pair1)s_vs_%(pair2)s_data.tsv.gz
| cgat runMEDIPS
--treatment=%(pair1)s
--control=%(pair2)s
--toolset=convert
--fdr-threshold=%(medips_fdr)f
--log=%(outfile)s.log
| gzip
> %(outfile)s
""")
P.run()
@P.cluster_runnable
def outputSpikeCounts(outfile, infile_name,
expression_nbins=None,
fold_nbins=None,
expression_bins=None,
fold_bins=None):
"""count significant results in bins of expression and fold change.
This method groups the results of a DE analysis in a 2-dimensonal
histogramy by tag counts/expression level and fold change.
Either supply one of `nbins` or `bins` for the histograms.
Arguments
---------
outfile : string
Output filename
infile_name : string
Input filename in :term:`tsv` format. Usually the output of
:mod:`scripts/runExpression`.
expression_nbins : int
Number of bins to use for tag count histogram.
fold_nbins : int
Number of bins to use for fold-change histogram.
expression_bins : list
List of bins to use for tag count histogram.
fold_bins : list
List of bins to use for fold-change histogram.
"""
df = pandas.read_csv(infile_name,
sep="\t",
index_col=0)
E.debug("read %i rows and %i columns of data" % df.shape)
if "edger" in outfile.lower():
# edger: treatment_mean and control_mean do not exist
# use supplied values directly.
l10average = numpy.log(df['treatment_mean'])
l2fold = numpy.log2(df['fold'])
else:
# use pseudocounts to compute fold changes
treatment_mean = df['treatment_mean'] + 1
control_mean = df['control_mean'] + 1
# build log2 average values
l10average = numpy.log((treatment_mean + control_mean) / 2)
l2fold = numpy.log2(treatment_mean / control_mean)
if expression_nbins is not None:
mm = math.ceil(max(l10average))
expression_bins = numpy.arange(0, mm, mm / expression_nbins)
if fold_nbins is not None:
mm = math.ceil(max(abs(min(l2fold)), abs(max(l2fold))))
# ensure that range is centered on exact 0
n = math.ceil(fold_nbins / 2.0)
fold_bins = numpy.concatenate(
(-numpy.arange(0, mm, mm / n)[:0:-1],
numpy.arange(0, mm, mm / n)))
# compute expression bins
d2hist_counts, xedges, yedges = numpy.histogram2d(
l10average, l2fold,
bins=(expression_bins,
fold_bins))
dd = pandas.DataFrame(d2hist_counts)
dd.index = list(xedges[:-1])
dd.columns = list(yedges[:-1])
dd.to_csv(IOTools.openFile(outfile, "w"),
sep="\t")
return df, d2hist_counts, xedges, yedges, l10average, l2fold
@P.cluster_runnable
def plotDETagStats(infile, composition_file, outfile):
'''plot differential expression statistics
Arguments
---------
infile : string
Filename with :term:`tsv` formatted list of differential
methylation results output from :doc:`scripts/runExpression`.
composition_file : string
Filename with :term:`tsv` formatted data about nucleotide
compositions of windows tested.
outfile : string
Output filename, used as sentinel only.
'''
Expression.plotDETagStats(
infile, outfile,
additional_file=composition_file,
join_columns=("contig", "start", "end"),
additional_columns=("CpG_density",
"length"))
P.touch(outfile)
@P.cluster_runnable
def buildSpikeResults(infile, outfile):
'''build matrices with results from spike-in and upload
into database.
The method will output several files:
.spiked.gz: Number of intervals that have been spiked-in
for each bin of expression and fold-change
.power.gz: Global power analysis - aggregates over all
ranges of fold-change and expression and outputs the
power, the proportion of intervals overall that
could be detected as differentially methylated.
This is a table with the following columns:
fdr - fdr threshold
power - power level, number of intervals detectable
intervals - number of intervals in observed data at given
level of fdr and power.
intervals_percent - percentage of intervals in observed data
at given level of fdr and power
The method will also upload the results into the database.
Arguments
---------
infile : string
Input filename in :term:`tsv` format. Usually the output of
:mod:`scripts/runExpression`.
outfile : string
Output filename in :term:`tsv` format.
'''
expression_nbins = 10
fold_nbins = 10
spikefile = P.snip(infile, '.tsv.gz') + '.spike.gz'
if not os.path.exists(spikefile):
E.warn('no spike data: %s' % spikefile)
P.touch(outfile)
return
########################################
# output and load spiked results
tmpfile_name = P.getTempFilename(shared=True)
statement = '''zcat %(spikefile)s
| grep -e "^spike" -e "^test_id"
> %(tmpfile_name)s
'''
P.run()
E.debug("outputting spiked counts")
(spiked, spiked_d2hist_counts, xedges, yedges,
spiked_l10average, spiked_l2fold) = \
outputSpikeCounts(
outfile=P.snip(outfile, ".power.gz") + ".spiked.gz",
infile_name=tmpfile_name,
expression_nbins=expression_nbins,
fold_nbins=fold_nbins)
########################################
# output and load unspiked results
statement = '''zcat %(infile)s
| grep -v -e "^spike"
> %(tmpfile_name)s
'''
P.run()
E.debug("outputting unspiked counts")
(unspiked, unspiked_d2hist_counts, unspiked_xedges,
unspiked_yedges, unspiked_l10average, unspiked_l2fold) = \
outputSpikeCounts(
outfile=P.snip(outfile, ".power.gz") + ".unspiked.gz",
infile_name=tmpfile_name,
expression_bins=xedges,
fold_bins=yedges)
E.debug("computing power")
assert xedges.all() == unspiked_xedges.all()
tmpfile = IOTools.openFile(tmpfile_name, "w")
tmpfile.write("\t".join(
("expression",
"fold",
"fdr",
"counts",
"percent")) + "\n")
fdr_thresholds = [0.01, 0.05] + list(numpy.arange(0.1, 1.0, 0.1))
power_thresholds = numpy.arange(0.1, 1.1, 0.1)
spiked_total = float(spiked_d2hist_counts.sum().sum())
unspiked_total = float(unspiked_d2hist_counts.sum().sum())
outf = IOTools.openFile(outfile, "w")
outf.write("fdr\tpower\tintervals\tintervals_percent\n")
# significant results
for fdr in fdr_thresholds:
take = spiked['qvalue'] < fdr
# compute 2D histogram in spiked data below fdr threshold
spiked_d2hist_fdr, xedges, yedges = \
numpy.histogram2d(spiked_l10average[take],
spiked_l2fold[take],
bins=(xedges, yedges))
# convert to percentage of spike-ins per bin
spiked_d2hist_fdr_normed = spiked_d2hist_fdr / spiked_d2hist_counts
spiked_d2hist_fdr_normed = numpy.nan_to_num(spiked_d2hist_fdr_normed)
# set values without data to -1
spiked_d2hist_fdr_normed[spiked_d2hist_counts == 0] = -1.0
# output to table for database upload
for x, y in itertools.product(list(range(len(xedges) - 1)),
list(range(len(yedges) - 1))):
tmpfile.write("\t".join(map(
str, (xedges[x], yedges[y],
fdr,
spiked_d2hist_fdr[x, y],
100.0 * spiked_d2hist_fdr_normed[x, y]))) + "\n")
# take elements in spiked_hist_fdr above a certain threshold
for power in power_thresholds:
# select 2D bins at a given power level
power_take = spiked_d2hist_fdr_normed >= power
# select the counts in the unspiked data according
# to this level
power_counts = unspiked_d2hist_counts[power_take]
outf.write("\t".join(map(
str, (fdr, power,
power_counts.sum().sum(),
100.0 * power_counts.sum().sum() /
unspiked_total))) + "\n")
tmpfile.close()
outf.close()
# upload into table
method = P.snip(os.path.dirname(outfile), ".dir")
tablename = P.toTable(
P.snip(outfile, "power.gz") + method + ".spike.load")
P.load(tmpfile_name,
outfile + ".log",
tablename=tablename,
options="--add-index=fdr")
os.unlink(tmpfile_name)
def summarizeTagsWithinContext(tagfile,
contextfile,
outfile,
min_overlap=0.5,
job_memory="4G"):
'''count occurances of tags in genomic context.
Examines the genomic context to where tags align.
A tag is assigned to the genomic context that it
overlaps by at least 50%. Thus some reads mapping
several contexts might be dropped.
Arguments
---------
tagfile : string
Filename with tags. The file can be :term:`bam` or :term:`bed` format.
contextfile : string
Filename of :term:`bed` formatted files with named intervals (BED4).
outfile : string
Output in :term:`tsv` format.
min_overlap : float
Minimum overlap (fraction) to count features as overlapping.
job_memory : string
Memory to reserve.
'''
statement = '''
cgat bam_vs_bed
--min-overlap=%(min_overlap)f
--log=%(outfile)s.log
%(tagfile)s %(contextfile)s
| gzip
> %(outfile)s
'''
P.run()
def mergeSummarizedContextStats(infiles, outfile, samples_in_columns=False):
"""combine output from :func:`summarizeTagsWithinContext`.
Arguments
---------
infiles : list
List of filenames in :term:`tsv` format
outfile : string
Output filename in :term:`tsv` format.
samples_in_columns :
If True, put samples in columns. The default is to put them
in rows.
"""
header = ",".join([P.snip(os.path.basename(x), ".contextstats.tsv.gz")
for x in infiles])
filenames = " ".join(infiles)
if not samples_in_columns:
transpose_cmd = \
"""| cgat table2table
--transpose""" % P.getParams()
else:
transpose_cmd = ""
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--skip-titles
%(filenames)s
| perl -p -e "s/bin/track/; s/\?/Q/g"
%(transpose_cmd)s
| gzip
> %(outfile)s
"""
P.run()
def loadSummarizedContextStats(infiles,
outfile,
suffix=".contextstats.tsv.gz"):
"""merge output from :func:`summarizeTagsWithinContex` and load into database.
Arguments
---------
infiles : list
List of filenames in :term:`tsv` format. The files should end
in suffix.
outfile : string
Output filename, the table name is derived from `outfile`.
suffix : string
Suffix to remove from filename for track name.
"""
header = ",".join([P.snip(os.path.basename(x), suffix)
for x in infiles])
filenames = " ".join(infiles)
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=track")
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--skip-titles
%(filenames)s
| perl -p -e "s/bin/track/; s/\?/Q/g"
| cgat table2table --transpose
| %(load_statement)s
> %(outfile)s
"""
P.run()
def testTagContextOverlap(tagfile,
contextfile,
workspace,
outfile,
job_threads=1,
samples=10000,
options=""):
"""use gat to test for overlap between tags and genomic context.
Arguments
---------
tagfile : string
Filename with read tags :term:`bed` format. Tags can be
overlapping.
contextfile : string
Filename with genomic context information in :term:`bed`
format.
workspace : string
Genomic workspace for gat simulations in :term:`bed` format.
outfile : string
Output filename in :term:`tsv` format.
threads : int
Number of threads to use.
samples : int
Number of samples to compute.
options : string
Options to pass to the gat program.
"""
statement = """
gat-run.py
--annotations-label=reads
--annotations=%(tagfile)s
--segments=%(contextfile)s
--workspace=%(workspace)s
--overlapping-annotations
--annotations-to-points=midpoint
--counter=annotation-overlap
--with-segment-tracks
--num-samples=%(samples)i
--num-threads=%(job_threads)i
--log=%(outfile)s.log
%(options)s
| gzip
> %(outfile)s
"""
P.run()
| mit | -5,619,507,310,090,360,000 | 29.319392 | 82 | 0.561073 | false |
greatmazinger/programming-interview-code | Python/no_repeating_digits.py | 1 | 1460 | import unittest
from collections import Counter
"""
No repeating digits problem:
Examples:
"""
limit = 98765432100
def no_repeating_digits(num):
num_str = str(num)
c = Counter(list(num_str))
most_common = c.most_common(1)
return most_common[0][1] == 1
def count_non_repeating_numbers(low, high):
total = 0
cur = low
while cur <= high and cur <= limit:
if no_repeating_digits(cur):
total += 1
cur += 1
return total
# TODO
# class test_first_nonrepeated_character(unittest.TestCase):
# def test_one(self):
# self.assertEqual(first_nonrepeated_character("ccaat"), 't')
# def test_two(self):
# self.assertEqual(first_nonrepeated_character("x"), 'x')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Count number of numbers that don't have repeating digits.")
parser.add_argument( 'n1',
metavar = 'N1',
type = int,
help = 'Left number (inclusive)' )
parser.add_argument( 'n2',
metavar = 'N2',
type = int,
help = 'Right number (inclusive)' )
args = parser.parse_args()
test = False
if test:
unittest.main()
else:
# Do the args passed in. TODO
result = count_non_repeating_numbers( args.n1, args.n2 )
print(result)
| gpl-2.0 | -1,023,518,888,930,635,300 | 26.037037 | 109 | 0.559589 | false |
gregreen/bayestar | scripts/compareColors.py | 1 | 10545 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# compareColors.py
#
# Copyright 2013 Greg Green <greg@greg-UX31A>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import numpy as np
import scipy, scipy.stats, scipy.special
import h5py
import time
import argparse, sys, os
import matplotlib.pyplot as plt
import matplotlib as mplib
from mpl_toolkits.axes_grid1 import Grid
from matplotlib.ticker import MaxNLocator, AutoMinorLocator, FormatStrFormatter
import hdf5io
class TStellarModel:
def __init__(self, fname):
self.load(fname)
def load(self, fname):
f = open(fname, 'r')
row = []
for l in f:
line = l.rstrip().lstrip()
if len(line) == 0: # Empty line
continue
if line[0] == '#': # Comment
continue
data = line.split()
if len(data) < 6:
txt = 'Error reading in stellar templates.\n'
txt += 'The following line does not have the correct number of entries (6 expected):\n'
txt += line
raise Exception(txt)
row.append([float(s) for s in data])
f.close()
template = np.array(row, dtype='f8')
fields = ['Mr', 'FeH', 'gr', 'ri', 'iz', 'zy']
dtype = [(field, 'f8') for field in fields]
self.data = np.empty(len(template), dtype=dtype)
for i,field in enumerate(fields):
self.data[field][:] = template[:,i]
self.FeH = np.unique(self.data['FeH'])
def get_isochrone(self, FeH):
if FeH >= np.max(self.FeH):
FeH_eval = np.max(self.FeH)
idx = (self.data['FeH'] == FeH_eval)
return self.data[idx]
elif FeH <= np.min(self.FeH):
FeH_eval = np.min(self.FeH)
idx = (self.data['FeH'] == FeH_eval)
return self.data[idx]
else:
k = np.arange(self.FeH.size)
#print np.where(FeH > self.FeH, k, -1)
#print self.FeH
idx = np.max(np.where(FeH > self.FeH, k, -1))
FeH_eval = [self.FeH[idx], self.FeH[idx+1]]
a = float(FeH_eval[1] - FeH) / float(FeH_eval[1] - FeH_eval[0])
idx = (self.data['FeH'] == FeH_eval[0])
d1 = self.data[idx]
idx = (self.data['FeH'] == FeH_eval[1])
d2 = self.data[idx]
if np.any(d1['Mr'] != d2['Mr']):
raise Exception('Expected Mr samples to be the same for each metallicity.')
fields = ['Mr', 'FeH', 'gr', 'ri', 'iz', 'zy']
dtype = [(field, 'f8') for field in fields]
ret = np.empty(len(d1), dtype=dtype)
#print FeH_eval
#print a
for field in fields:
ret[field][:] = a * d1[field][:] + (1. - a) * d2[field][:]
return ret
def read_photometry(fname, target_name):
f = h5py.File(fname, 'r')
# Hack to get the file to read properly
#try:
# f.items()
#except:
# pass
phot = f['photometry']
# Load in photometry from selected target
for name,item in phot.iteritems():
if 'pixel' in name:
t_name = item.attrs['target_name']
if t_name == target_name:
mags = item['mag'][:]
errs = item['err'][:]
EBV_SFD = item['EBV'][:]
pix_idx = int(name.split()[1])
return mags, errs, EBV_SFD, t_name, pix_idx
return None
def read_evidences(fname, pix_idx):
f = h5py.File(fname, 'r')
lnZ = None
dset = '/pixel %d/stellar chains' % pix_idx
try:
lnZ = f[dset].attrs['ln(Z)'][:]
except:
print 'Dataset "%s" does not exist.' % dset
return lnZ
def get_reddening_vector():
return np.array([3.172, 2.271, 1.682, 1.322, 1.087])
def dereddened_mags(mags, EBV):
R = get_reddening_vector()
if type(EBV) == float:
R.shape = (1, R.size)
R = np.repeat(R, len(mags), axis=0)
return mags - EBV * R
elif type(EBV) == np.ndarray:
return mags - np.einsum('i,j->ij', EBV, R)
else:
raise TypeError('EBV has unexpected type: %s' % type(EBV))
def plot_cluster(ax, template, mags):
pass
def main():
parser = argparse.ArgumentParser(prog='compareColors.py',
description='Compare photometric colors to model colors.',
add_help=True)
parser.add_argument('--templates', '-t', type=str, required=True,
help='Stellar templates (in ASCII format).')
parser.add_argument('--photometry', '-ph', type=str, nargs='+', required=True,
help='Bayestar input file(s) with photometry.')
parser.add_argument('--evidences', '-ev', type=str, nargs='+', default=None,
help='Bayestar output file(s) with evidences.')
parser.add_argument('--names', '-n', type=str, nargs='+', required=True,
help='Region names.')
parser.add_argument('--output', '-o', type=str, default=None, help='Plot filename.')
parser.add_argument('--show', '-sh', action='store_true', help='Show plot.')
if 'python' in sys.argv[0]:
offset = 2
else:
offset = 1
args = parser.parse_args(sys.argv[offset:])
n_targets = len(args.names)
templates = TStellarModel(args.templates)
color_names = ['gr', 'ri', 'iz', 'zy']
# Set matplotlib style attributes
mplib.rc('text', usetex=True)
mplib.rc('xtick.major', size=6)
mplib.rc('xtick.minor', size=4)
mplib.rc('ytick.major', size=6)
mplib.rc('ytick.minor', size=4)
mplib.rc('xtick', direction='in')
mplib.rc('ytick', direction='in')
mplib.rc('axes', grid=False)
fig = plt.figure(figsize=(6., 2.*n_targets), dpi=150)
axgrid = Grid(fig, 111,
nrows_ncols=(n_targets, 3),
axes_pad=0.05,
add_all=True,
label_mode='L')
color_min = np.empty(len(color_names), dtype='f8')
color_max = np.empty(len(color_names), dtype='f8')
color_min[:] = np.inf
color_max[:] = -np.inf
for target_num, target_name in enumerate(args.names):
# Load photometry
ret = None
for fname in args.photometry:
ret = read_photometry(fname, target_name)
if ret != None:
break
if ret == None:
print 'Target "%s" not found.' % target_name
return 0
mags, errs, EBV, t_name, pix_idx = ret
mags = dereddened_mags(mags, EBV)
# Load evidences
lnZ = np.zeros(len(mags), dtype='f8')
if args.evidences != None:
for fname in args.evidences:
ret = read_evidences(fname, pix_idx)
if ret != None:
lnZ = ret[:]
break
print '== Target #%d ==' % target_num
print ' name: %s' % (target_name)
print ' E(B-V): %.2f' % (np.percentile(EBV, 95.))
print ' # of stars: %d' % (len(mags))
txt = ''
idx = ( (mags[:,0] > 10.) & (mags[:,0] < 25.)
& (mags[:,1] > 10.) & (mags[:,1] < 25.) )
mags = mags[idx]
errs = errs[idx]
lnZ = lnZ[idx]
colors = -np.diff(mags, axis=1)
idx = ( (colors[:,0] > np.percentile(colors[:,0], 0.5))
& (colors[:,0] < np.percentile(colors[:,0], 99.5)) )
colors = colors[idx]
mags = mags[idx]
errs = errs[idx]
lnZ = lnZ[idx]
color_min[0], color_max[0] = np.percentile(colors[:,0], [1., 99.])
# Plot color-Magnitude diagrams
for i,c in enumerate(color_names[1:]):
ax = axgrid[3*target_num + i]
idx = ( (colors[:,i+1] > np.percentile(colors[:,i+1], 0.5))
& (colors[:,i+1] < np.percentile(colors[:,i+1], 99.5))
& (mags[:,i+1] > 10.) & (mags[:,i+1] < 25.)
& (mags[:,i+2] > 10.) & (mags[:,i+2] < 25.) )
# Empirical
lnZ_max = 0. #np.percentile(lnZ_tmp[idx], 97.)
Delta_lnZ = 25.
colors_tmp = colors[idx, i+1]
ax.scatter(colors_tmp, colors[idx,0],
c=lnZ[idx], cmap='Spectral',
vmin=lnZ_max-Delta_lnZ, vmax=lnZ_max,
s=1.5, alpha=0.1, edgecolor='none')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Model
for FeH, style in zip([0., -1., -2.], ['c-', 'y-', 'r-']):
isochrone = templates.get_isochrone(FeH)
ax.plot(isochrone[c], isochrone['gr'], style, lw=1, alpha=0.25)
print isochrone[c]
print isochrone['gr']
print ''
color_min_tmp, color_max_tmp = np.percentile(colors_tmp, [1., 99.])
if color_min_tmp < color_min[i+1]:
color_min[i+1] = color_min_tmp
if color_max_tmp > color_max[i+1]:
color_max[i+1] = color_max_tmp
txt += ' %s: %d stars\n' % (c, np.sum(idx))
txt += ' ln(Z_max) = %.2f\n' % lnZ_max
print txt
print ''
# Reddening vectors
'''R = get_reddening_vector()
EBV_0 = np.median(EBV)
for i in range(len(colors)):
A_gr = EBV_0 * (R[0] - R[1])
A_xy = EBV_0 * (R[i] - R[i+1])
r_0 = Mr_min
w = color_max[i] - color_min[i]
h = Mr_max - Mr_min
xy_0 = color_min[i] + 0.1 * w
axgrid[4*cluster_num + i].arrow(xy_0, r_0, A_xy, A_r,
head_width=0.03*w, head_length=0.01*h,
color='r', alpha=0.5)
'''
# Format y-axes
axgrid[3*target_num].set_ylim(color_min[0], color_max[1])
axgrid[3*target_num].set_ylabel(r'$g - r$', fontsize=16)
axgrid[3*target_num].yaxis.set_major_locator(MaxNLocator(nbins=5))
axgrid[3*target_num].yaxis.set_minor_locator(AutoMinorLocator())
# Format x-axes
for i,c in enumerate(color_names[1:]):
axgrid[i].set_xlim(color_min[i+1], color_max[i+1])
color_label = r'$%s - %s$' % (c[0], c[1])
axgrid[3*(n_targets-1) + i].set_xlabel(color_label, fontsize=16)
axgrid[3*(n_targets-1) + i].xaxis.set_major_locator(MaxNLocator(nbins=4))
axgrid[3*(n_targets-1) + i].xaxis.set_minor_locator(AutoMinorLocator())
fig.subplots_adjust(left=0.12, right=0.85, top=0.98, bottom=0.10)
cax = fig.add_axes([0.87, 0.10, 0.03, 0.88])
norm = mplib.colors.Normalize(vmin=-25., vmax=0.)
mappable = mplib.cm.ScalarMappable(cmap='Spectral', norm=norm)
mappable.set_array(np.array([-25., 0.]))
fig.colorbar(mappable, cax=cax, ticks=[0., -5., -10., -15., -20., -25.])
cax.yaxis.set_label_position('right')
cax.yaxis.tick_right()
#cax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
#cax.set_yticks([0., -5., -10., -15., -20., -25.])
cax.set_ylabel(r'$\mathrm{ln} \left( Z \right)$', rotation='vertical', fontsize=16)
if args.output != None:
fig.savefig(args.output, dpi=300)
if args.show:
plt.show()
return 0
if __name__ == '__main__':
main()
| gpl-2.0 | 327,697,341,319,020,540 | 28.704225 | 92 | 0.597345 | false |
datawire/telepresence | ci/clean-cluster.py | 1 | 3179 | #!/usr/bin/env python3
"""
Delete old deployments and services with test-prefixed names. This is used to
clean up the Telepresence test cluster, as Telepresence tests currently leak.
"""
import argparse
import json
from datetime import datetime, timedelta, timezone
from subprocess import check_output, run
from typing import Dict, List
def get_now() -> datetime:
"""Get current date/time in UTC"""
return datetime.now(tz=timezone.utc)
def parse_k8s_timestamp(timestamp: str) -> datetime:
"""Get date/time in UTC from k8s timestamp"""
fmt = "%Y-%m-%dT%H:%M:%SZ"
naive = datetime.strptime(timestamp, fmt)
return naive.replace(tzinfo=timezone.utc)
def get_kubectl_json(cmd: List[str]) -> Dict:
"""Call kubectl and parse resulting JSON"""
output = str(check_output(["kubectl"] + cmd + ["-o", "json"]), "utf-8")
return json.loads(output)
KINDS = "ns", "svc", "deploy", "po"
KIND_MAP = {
"Namespace": "ns/",
"Service": "svc/",
"Deployment": "deploy/",
"Pod": "po/"
}
def get_resource_names(kinds: List[str], prefix: str,
min_age: timedelta) -> List[str]:
"""
Return kind/name of resources with the given name prefix and minimum age
"""
now = get_now()
resources = get_kubectl_json(["get", ",".join(kinds)])["items"]
names = []
for resource in resources:
kind = resource["kind"]
metadata = resource["metadata"]
name = metadata["name"]
if kind == "Service" and name == "kubernetes":
continue
if not name.startswith(prefix):
continue
timestamp_str = metadata["creationTimestamp"]
timestamp = parse_k8s_timestamp(timestamp_str)
age = now - timestamp
if age < min_age:
continue
names.append(KIND_MAP[kind] + name)
return names
def seconds(value: str) -> timedelta:
"""Return a timedelta with the given number of seconds"""
try:
return timedelta(seconds=int(value))
except ValueError:
message = "Invalid age in seconds: {}".format(value)
raise argparse.ArgumentTypeError(message)
def main():
"""Clean up the current Kubernetes cluster"""
parser = argparse.ArgumentParser(
allow_abbrev=False, # can make adding changes not backwards compatible
description=__doc__
)
parser.add_argument(
"--prefix",
default="testing-",
help="prefix for resource name [testing-]"
)
parser.add_argument(
"--min-age",
type=seconds,
default="86400",
help="minimum age in seconds"
)
parser.add_argument(
"--dry-run", action="store_true", help="don't really delete anything"
)
args = parser.parse_args()
names = get_resource_names(KINDS, args.prefix, args.min_age)
if not names:
print("Nothing to clean up.")
return
if args.dry_run:
print("Would clean up:")
else:
print("Cleaning up:")
for name in names:
print(" {}".format(name))
if not args.dry_run:
run(["kubectl", "delete", "--wait=false"] + names)
if __name__ == "__main__":
main()
| apache-2.0 | 619,881,604,613,905,700 | 26.643478 | 79 | 0.60648 | false |
OpenTechFund/WebApp | opentech/public/projects/migrations/0002_projectfunding.py | 1 | 1302 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-15 17:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('projects', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProjectFunding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('value', models.PositiveIntegerField()),
('year', models.PositiveIntegerField()),
('duration', models.PositiveIntegerField(help_text='In months')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='funding', to='projects.ProjectPage')),
('source', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| gpl-2.0 | -5,559,156,850,558,860,000 | 37.294118 | 154 | 0.593702 | false |
nikcub/Sketch | sketch/util/gae.py | 1 | 2267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=2:sw=2:expandtab
#
# Copyright (c) 2010-2011, Nik Cubrilovic. All rights reserved.
#
# <[email protected]> <http://nikcub.appspot.com>
#
# Licensed under a BSD license. You may obtain a copy of the License at
#
# http://nikcub.appspot.com/bsd-license
#
"""
Sketch - gae.py
Google App Engine utils
This source file is subject to the new BSD license that is bundled with this
package in the file LICENSE.txt. The license is also available online at the
URL: <http://nikcub.appspot.com/bsd-license.txt>
:copyright: Copyright (C) 2011 Nik Cubrilovic and others, see AUTHORS
:license: new BSD, see LICENSE for more details.
"""
import os
import sys
def setup_gae_paths(gae_base_dir=None):
if not gae_base_dir:
gae_base_dir = '/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine'
if not os.path.isdir(gae_base_dir):
return False
ae_path = os.path.abspath(os.path.realpath(gae_base_dir))
AE_PATHS = [
ae_path,
os.path.join(ae_path, 'lib', 'antlr3'),
os.path.join(ae_path, 'lib', 'django'),
os.path.join(ae_path, 'lib', 'fancy_urllib'),
os.path.join(ae_path, 'lib', 'ipaddr'),
os.path.join(ae_path, 'lib', 'webob'),
os.path.join(ae_path, 'lib', 'yaml', 'lib'),
]
sys.path = sys.path + AE_PATHS
return True
def enable_ctypes():
"""Enable ctypes in Google App Engine development server
"""
return enable_dev_module(['_ctypes', 'gestalt'])
def enable_dev_module(modules=[]):
"""In Google App Engine development server whitelist a C module or other module
that has been disabled to assist with local development.
for eg. Enable _ctypes so that Jinja rendering error messages are sane
Should only run if on development server.
:param modules: Modules to enable in dev server
:return List: List of modules that have been whitelisted
"""
try:
from google.appengine.tools.dev_appserver import HardenedModulesHook
except ImportError:
return False
if type(modules) == type([]) and len(modules) > 0:
HardenedModulesHook._WHITE_LIST_C_MODULES += modules
return HardenedModulesHook._WHITE_LIST_C_MODULES
return False
| bsd-2-clause | -1,236,930,795,368,294,700 | 29.24 | 148 | 0.698721 | false |
ToonTownInfiniteRepo/ToontownInfinite | otp/ai/AIMsgTypes.py | 1 | 3083 | from otp.distributed.OtpDoGlobals import *
from direct.showbase.PythonUtil import invertDictLossless
OTP_SERVER_ROOT_DO_ID = 4007
CHANNEL_CLIENT_BROADCAST = 4014
BAD_CHANNEL_ID = 0
BAD_ZONE_ID = 0
BAD_DO_ID = 0
CONTROL_MESSAGE = 4001
CONTROL_SET_CHANNEL = 2001
CONTROL_REMOVE_CHANNEL = 2002
CONTROL_SET_CON_NAME = 2004
CONTROL_SET_CON_URL = 2005
CONTROL_ADD_RANGE = 2008
CONTROL_REMOVE_RANGE = 2009
CONTROL_ADD_POST_REMOVE = 2010
CONTROL_CLEAR_POST_REMOVE = 2011
AIMsgName2Id = {'STATESERVER_OBJECT_GENERATE_WITH_REQUIRED': 2001,
'STATESERVER_OBJECT_GENERATE_WITH_REQUIRED_OTHER': 2003,
'STATESERVER_OBJECT_UPDATE_FIELD': 2004,
'STATESERVER_OBJECT_UPDATE_FIELD_MULTIPLE': 2005,
'STATESERVER_OBJECT_DELETE_RAM': 2007,
'STATESERVER_OBJECT_SET_ZONE': 2008,
'STATESERVER_OBJECT_CHANGE_ZONE': 2009,
'STATESERVER_OBJECT_NOTFOUND': 2015,
'STATESERVER_QUERY_OBJECT_ALL': 2020,
'STATESERVER_QUERY_ZONE_OBJECT_ALL': 2021,
'STATESERVER_OBJECT_LOCATE': 2022,
'STATESERVER_OBJECT_LOCATE_RESP': 2023,
'STATESERVER_OBJECT_QUERY_FIELD': 2024,
'STATESERVER_QUERY_OBJECT_ALL_RESP': 2030,
'STATESERVER_SHARD_REST': 2061,
'STATESERVER_ADD_AI_RECV': 2045,
'STATESERVER_QUERY_ZONE_OBJECT_ALL_DONE': 2046,
'STATESERVER_OBJECT_CREATE_WITH_REQUIRED_CONTEXT': 2050,
'STATESERVER_OBJECT_CREATE_WITH_REQUIR_OTHER_CONTEXT': 2051,
'STATESERVER_OBJECT_CREATE_WITH_REQUIRED_CONTEXT_RESP': 2052,
'STATESERVER_OBJECT_CREATE_WITH_REQUIR_OTHER_CONTEXT_RESP': 2053,
'STATESERVER_OBJECT_DELETE_DISK': 2060,
'STATESERVER_OBJECT_QUERY_FIELD_RESP': 2062,
'STATESERVER_OBJECT_ENTERZONE_WITH_REQUIRED_OTHER': 2066,
'STATESERVER_OBJECT_ENTER_AI_RECV': 2067,
'STATESERVER_OBJECT_LEAVING_AI_INTEREST': 2033,
'STATESERVER_OBJECT_ENTER_OWNER_RECV': 2068,
'STATESERVER_OBJECT_CHANGE_OWNER_RECV': 2069,
'STATESERVER_OBJECT_SET_OWNER_RECV': 2070,
'STATESERVER_OBJECT_QUERY_FIELDS': 2080,
'STATESERVER_OBJECT_QUERY_FIELDS_RESP': 2081,
'STATESERVER_OBJECT_QUERY_FIELDS_STRING': 2082,
'STATESERVER_OBJECT_QUERY_MANAGING_AI': 2083,
'STATESERVER_BOUNCE_MESSAGE': 2086,
'STATESERVER_QUERY_OBJECT_CHILDREN_LOCAL': 2087,
'STATESERVER_QUERY_OBJECT_CHILDREN_LOCAL_DONE': 2089,
'STATESERVER_QUERY_OBJECT_CHILDREN_RESP': 2087,
'ACCOUNT_AVATAR_USAGE': 3005,
'ACCOUNT_ACCOUNT_USAGE': 3006,
'CLIENT_AGENT_OPEN_CHANNEL': 3104,
'CLIENT_AGENT_CLOSE_CHANNEL': 3105,
'CLIENT_AGENT_SET_INTEREST': 3106,
'CLIENT_AGENT_REMOVE_INTEREST': 3107,
'CHANNEL_PUPPET_ACTION': 4004,
'DBSERVER_MAKE_FRIENDS': 1017,
'DBSERVER_MAKE_FRIENDS_RESP': 1031,
'DBSERVER_REQUEST_SECRET': 1025,
'DBSERVER_REQUEST_SECRET_RESP': 1026,
'DBSERVER_SUBMIT_SECRET': 1027,
'DBSERVER_SUBMIT_SECRET_RESP': 1028,
'DBSERVER_CREATE_STORED_OBJECT': 1003,
'DBSERVER_CREATE_STORED_OBJECT_RESP': 1004,
'DBSERVER_DELETE_STORED_OBJECT': 1008,
'DBSERVER_GET_STORED_VALUES': 1012,
'DBSERVER_GET_STORED_VALUES_RESP': 1013,
'DBSERVER_SET_STORED_VALUES': 1014,
'SERVER_PING': 5002}
AIMsgId2Names = invertDictLossless(AIMsgName2Id)
for name, value in AIMsgName2Id.items():
exec '%s = %s' % (name, value)
del name
del value
DBSERVER_ID = 4003
| mit | 8,465,867,581,276,243,000 | 37.5375 | 66 | 0.758028 | false |
OpenBMP/openbmp-python-api-message | src/openbmp/api/parsed/message/UnicastPrefix.py | 1 | 5938 |
"""
Copyright (c) 2015-2016 Cisco Systems, Inc. and others. All rights reserved.
This program and the accompanying materials are made available under the
terms of the Eclipse Public License v1.0 which accompanies this distribution,
and is available at http:#www.eclipse.org/legal/epl-v10.html
"""
from .Base import Base
from .FieldProcessors import ParseNullAsEmpty, ParseLongEmptyAsZero, ParseInt, NotNull, ParseTimestamp, ParseLong
from .Message import Message
from .MsgBusFields import MsgBusFields
class UnicastPrefix(Base):
"""
Format class for unicast_prefix parsed messages (openbmp.parsed.unicast_prefix)
Schema Version: 1.7
"""
minimum_header_names = [
MsgBusFields.ACTION.get_name(),
MsgBusFields.SEQUENCE.get_name(),
MsgBusFields.HASH.get_name(),
MsgBusFields.ROUTER_HASH.get_name(),
MsgBusFields.ROUTER_IP.get_name(),
MsgBusFields.BASE_ATTR_HASH.get_name(),
MsgBusFields.PEER_HASH.get_name(),
MsgBusFields.PEER_IP.get_name(),
MsgBusFields.PEER_ASN.get_name(),
MsgBusFields.TIMESTAMP.get_name(),
MsgBusFields.PREFIX.get_name(),
MsgBusFields.PREFIX_LEN.get_name(),
MsgBusFields.IS_IPV4.get_name(),
MsgBusFields.ORIGIN.get_name(),
MsgBusFields.AS_PATH.get_name(),
MsgBusFields.AS_PATH_COUNT.get_name(),
MsgBusFields.ORIGIN_AS.get_name(),
MsgBusFields.NEXTHOP.get_name(),
MsgBusFields.MED.get_name(),
MsgBusFields.LOCAL_PREF.get_name(),
MsgBusFields.AGGREGATOR.get_name(),
MsgBusFields.COMMUNITY_LIST.get_name(),
MsgBusFields.EXT_COMMUNITY_LIST.get_name(),
MsgBusFields.CLUSTER_LIST.get_name(),
MsgBusFields.ISATOMICAGG.get_name(),
MsgBusFields.IS_NEXTHOP_IPV4.get_name(),
MsgBusFields.ORIGINATOR_ID.get_name()
]
def __init__(self, message):
"""
Handle the message by parsing it and storing the data in memory.
:param message: 'Message' object.
"""
if not isinstance(message, Message):
raise TypeError("Expected Message object instead of type " + type(message))
version = message.get_version()
data = message.get_content()
super(UnicastPrefix, self).__init__()
if version >= float(1.7):
version_specific_headers = [
MsgBusFields.PATH_ID.get_name(),
MsgBusFields.LABELS.get_name(),
MsgBusFields.ISPREPOLICY.get_name(),
MsgBusFields.IS_ADJ_RIB_IN.get_name(),
MsgBusFields.LARGE_COMMUNITY_LIST.get_name()
]
elif version >= float(1.3):
version_specific_headers = [
MsgBusFields.PATH_ID.get_name(),
MsgBusFields.LABELS.get_name(),
MsgBusFields.ISPREPOLICY.get_name(),
MsgBusFields.IS_ADJ_RIB_IN.get_name()
]
elif version >= float(1.1):
version_specific_headers = [
MsgBusFields.PATH_ID.get_name(),
MsgBusFields.LABELS.get_name()
]
else:
version_specific_headers = []
# Concatenate minimum header names and version specific header names.
self.header_names = UnicastPrefix.minimum_header_names + version_specific_headers
self.spec_version = version
self.processors = self.get_processors()
if data:
self.parse(version, data)
def get_processors(self):
"""
Processors used for each field.
Order matters and must match the same order as defined in headerNames
:return: array of cell processors.
"""
default_cell_processors = [
NotNull(), # action
ParseLong(), # seq
NotNull(), # hash
NotNull(), # router hash
NotNull(), # router_ip
ParseNullAsEmpty(), # base_attr_hash
NotNull(), # peer_hash
NotNull(), # peer_ip
ParseLong(), # peer_asn
ParseTimestamp(), # timestamp
NotNull(), # prefix
ParseInt(), # prefix_len
ParseInt(), # isIPv4
ParseNullAsEmpty(), # origin
ParseNullAsEmpty(), # as_path
ParseLongEmptyAsZero(), # as_path_count
ParseLongEmptyAsZero(), # origin_as
ParseNullAsEmpty(), # nexthop
ParseLongEmptyAsZero(), # med
ParseLongEmptyAsZero(), # local_pref
ParseNullAsEmpty(), # aggregator
ParseNullAsEmpty(), # community_list
ParseNullAsEmpty(), # ext_community_list
ParseNullAsEmpty(), # cluster_list
ParseLongEmptyAsZero(), # isAtomicAgg
ParseLongEmptyAsZero(), # isNexthopIPv4
ParseNullAsEmpty(), # originator_id
]
if self.spec_version >= float(1.7):
version_specific_processors = [
ParseLongEmptyAsZero(), # Path ID
ParseNullAsEmpty(), # Labels
ParseLongEmptyAsZero(), # isPrePolicy
ParseLongEmptyAsZero(), # isAdjRibIn
ParseNullAsEmpty() # large communities
]
elif self.spec_version >= float(1.3):
version_specific_processors = [
ParseLongEmptyAsZero(), # Path ID
ParseNullAsEmpty(), # Labels
ParseLongEmptyAsZero(), # isPrePolicy
ParseLongEmptyAsZero() # isAdjRibIn
]
elif self.spec_version >= float(1.1):
version_specific_processors = [
ParseLongEmptyAsZero(), # Path ID
ParseNullAsEmpty(), # Labels
]
else:
version_specific_processors = []
return default_cell_processors + version_specific_processors
| epl-1.0 | -5,826,643,891,727,278,000 | 36.1125 | 113 | 0.582688 | false |
pyrocko/pyrocko | src/guts.py | 1 | 64588 | # -*- coding: utf-8 -*-
# http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
'''Lightweight declarative YAML and XML data binding for Python.'''
from __future__ import absolute_import, print_function
import datetime
import calendar
import re
import sys
import types
import copy
import os.path as op
from collections import defaultdict
from io import BytesIO
try:
import numpy as num
except ImportError:
num = None
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
from .util import time_to_str, str_to_time, TimeStrError, hpfloat, \
get_time_float
try:
newstr = unicode
range = xrange
except NameError:
newstr = str
try:
# needed for py2/py3 compatibility to allow
# from pyrocko.guts import FileNotFoundError
FileNotFoundError = FileNotFoundError
except NameError:
class FileNotFoundError(EnvironmentError):
pass
ALLOW_INCLUDE = False
class GutsSafeDumper(SafeDumper):
pass
class GutsSafeLoader(SafeLoader):
pass
try:
unicode
except NameError:
unicode = str
g_iprop = 0
g_deferred = {}
g_deferred_content = {}
g_tagname_to_class = {}
g_xmltagname_to_class = {}
g_guessable_xmlns = {}
guts_types = [
'Object', 'SObject', 'String', 'Unicode', 'Int', 'Float',
'Complex', 'Bool', 'Timestamp', 'DateTimestamp', 'StringPattern',
'UnicodePattern', 'StringChoice', 'List', 'Dict', 'Tuple', 'Union',
'Choice', 'Any']
us_to_cc_regex = re.compile(r'([a-z])_([a-z])')
class literal(str):
pass
class folded(str):
pass
class singlequoted(str):
pass
class doublequoted(str):
pass
def make_str_presenter(style):
def presenter(dumper, data):
return dumper.represent_scalar(
'tag:yaml.org,2002:str', str(data), style=style)
return presenter
str_style_map = {
None: lambda x: x,
'|': literal,
'>': folded,
"'": singlequoted,
'"': doublequoted}
for (style, cls) in str_style_map.items():
if style:
GutsSafeDumper.add_representer(cls, make_str_presenter(style))
class uliteral(unicode):
pass
class ufolded(unicode):
pass
class usinglequoted(unicode):
pass
class udoublequoted(unicode):
pass
def make_unicode_presenter(style):
def presenter(dumper, data):
return dumper.represent_scalar(
'tag:yaml.org,2002:str', unicode(data), style=style)
return presenter
unicode_style_map = {
None: lambda x: x,
'|': literal,
'>': folded,
"'": singlequoted,
'"': doublequoted}
for (style, cls) in unicode_style_map.items():
if style:
GutsSafeDumper.add_representer(cls, make_unicode_presenter(style))
class blist(list):
pass
class flist(list):
pass
list_style_map = {
None: list,
'block': blist,
'flow': flist}
def make_list_presenter(flow_style):
def presenter(dumper, data):
return dumper.represent_sequence(
'tag:yaml.org,2002:seq', data, flow_style=flow_style)
return presenter
GutsSafeDumper.add_representer(blist, make_list_presenter(False))
GutsSafeDumper.add_representer(flist, make_list_presenter(True))
if num:
def numpy_float_presenter(dumper, data):
return dumper.represent_float(float(data))
def numpy_int_presenter(dumper, data):
return dumper.represent_int(int(data))
for dtype in (num.float64, num.float32):
GutsSafeDumper.add_representer(dtype, numpy_float_presenter)
for dtype in (num.int32, num.int64):
GutsSafeDumper.add_representer(dtype, numpy_int_presenter)
def us_to_cc(s):
return us_to_cc_regex.sub(lambda pat: pat.group(1)+pat.group(2).upper(), s)
cc_to_us_regex1 = re.compile(r'([a-z])([A-Z]+)([a-z]|$)')
cc_to_us_regex2 = re.compile(r'([A-Z])([A-Z][a-z])')
def cc_to_us(s):
return cc_to_us_regex2.sub('\\1_\\2', cc_to_us_regex1.sub(
'\\1_\\2\\3', s)).lower()
re_frac = re.compile(r'\.[1-9]FRAC')
frac_formats = dict([('.%sFRAC' % x, '%.'+x+'f') for x in '123456789'])
def encode_utf8(s):
return s.encode('utf-8')
def no_encode(s):
return s
def make_xmltagname_from_name(name):
return us_to_cc(name)
def make_name_from_xmltagname(xmltagname):
return cc_to_us(xmltagname)
def make_content_name(name):
if name.endswith('_list'):
return name[:-5]
elif name.endswith('s'):
return name[:-1]
else:
return name
def classnames(cls):
if isinstance(cls, tuple):
return '(%s)' % ', '.join(x.__name__ for x in cls)
else:
return cls.__name__
def expand_stream_args(mode):
def wrap(f):
'''Decorator to enhance functions taking stream objects.
Wraps a function f(..., stream, ...) so that it can also be called as
f(..., filename='myfilename', ...) or as f(..., string='mydata', ...).
'''
def g(*args, **kwargs):
stream = kwargs.pop('stream', None)
filename = kwargs.get('filename', None)
if mode != 'r':
filename = kwargs.pop('filename', None)
string = kwargs.pop('string', None)
assert sum(x is not None for x in (stream, filename, string)) <= 1
if stream is not None:
kwargs['stream'] = stream
return f(*args, **kwargs)
elif filename is not None:
stream = open(filename, mode+'b')
kwargs['stream'] = stream
retval = f(*args, **kwargs)
if isinstance(retval, types.GeneratorType):
def wrap_generator(gen):
try:
for x in gen:
yield x
except GeneratorExit:
pass
stream.close()
return wrap_generator(retval)
else:
stream.close()
return retval
elif string is not None:
assert mode == 'r', \
'Keyword argument string=... cannot be used in dumper ' \
'function.'
kwargs['stream'] = BytesIO(string.encode('utf-8'))
return f(*args, **kwargs)
else:
assert mode == 'w', \
'Use keyword argument stream=... or filename=... in ' \
'loader function.'
sout = BytesIO()
f(stream=sout, *args, **kwargs)
return sout.getvalue().decode('utf-8')
return g
return wrap
class Defer(object):
def __init__(self, classname, *args, **kwargs):
global g_iprop
if kwargs.get('position', None) is None:
kwargs['position'] = g_iprop
g_iprop += 1
self.classname = classname
self.args = args
self.kwargs = kwargs
class TBase(object):
strict = False
multivalued = None
force_regularize = False
propnames = []
@classmethod
def init_propertystuff(cls):
cls.properties = []
cls.xmltagname_to_name = {}
cls.xmltagname_to_name_multivalued = {}
cls.xmltagname_to_class = {}
cls.content_property = None
def __init__(
self,
default=None,
optional=False,
xmlstyle='element',
xmltagname=None,
xmlns=None,
help=None,
position=None):
global g_iprop
if position is not None:
self.position = position
else:
self.position = g_iprop
g_iprop += 1
self._default = default
self.optional = optional
self.name = None
self._xmltagname = xmltagname
self._xmlns = xmlns
self.parent = None
self.xmlstyle = xmlstyle
self.help = help
def default(self):
return make_default(self._default)
def is_default(self, val):
if self._default is None:
return val is None
else:
return self._default == val
def has_default(self):
return self._default is not None
def xname(self):
if self.name is not None:
return self.name
elif self.parent is not None:
return 'element of %s' % self.parent.xname()
else:
return '?'
def set_xmlns(self, xmlns):
if self._xmlns is None and not self.xmlns:
self._xmlns = xmlns
if self.multivalued:
self.content_t.set_xmlns(xmlns)
def get_xmlns(self):
return self._xmlns or self.xmlns
def get_xmltagname(self):
if self._xmltagname is not None:
return self.get_xmlns() + ' ' + self._xmltagname
elif self.name:
return self.get_xmlns() + ' ' \
+ make_xmltagname_from_name(self.name)
elif self.xmltagname:
return self.get_xmlns() + ' ' + self.xmltagname
else:
assert False
@classmethod
def get_property(cls, name):
for prop in cls.properties:
if prop.name == name:
return prop
raise ValueError()
@classmethod
def remove_property(cls, name):
prop = cls.get_property(name)
if not prop.multivalued:
del cls.xmltagname_to_class[prop.effective_xmltagname]
del cls.xmltagname_to_name[prop.effective_xmltagname]
else:
del cls.xmltagname_to_class[prop.content_t.effective_xmltagname]
del cls.xmltagname_to_name_multivalued[
prop.content_t.effective_xmltagname]
if cls.content_property is prop:
cls.content_property = None
cls.properties.remove(prop)
cls.propnames.remove(name)
return prop
@classmethod
def add_property(cls, name, prop):
prop.instance = prop
prop.name = name
prop.set_xmlns(cls.xmlns)
if isinstance(prop, Choice.T):
for tc in prop.choices:
tc.effective_xmltagname = tc.get_xmltagname()
cls.xmltagname_to_class[tc.effective_xmltagname] = tc.cls
cls.xmltagname_to_name[tc.effective_xmltagname] = prop.name
elif not prop.multivalued:
prop.effective_xmltagname = prop.get_xmltagname()
cls.xmltagname_to_class[prop.effective_xmltagname] = prop.cls
cls.xmltagname_to_name[prop.effective_xmltagname] = prop.name
else:
prop.content_t.name = make_content_name(prop.name)
prop.content_t.effective_xmltagname = \
prop.content_t.get_xmltagname()
cls.xmltagname_to_class[
prop.content_t.effective_xmltagname] = prop.content_t.cls
cls.xmltagname_to_name_multivalued[
prop.content_t.effective_xmltagname] = prop.name
cls.properties.append(prop)
cls.properties.sort(key=lambda x: x.position)
cls.propnames = [p.name for p in cls.properties]
if prop.xmlstyle == 'content':
cls.content_property = prop
@classmethod
def ivals(cls, val):
for prop in cls.properties:
yield getattr(val, prop.name)
@classmethod
def ipropvals(cls, val):
for prop in cls.properties:
yield prop, getattr(val, prop.name)
@classmethod
def inamevals(cls, val):
for prop in cls.properties:
yield prop.name, getattr(val, prop.name)
@classmethod
def ipropvals_to_save(cls, val, xmlmode=False):
for prop in cls.properties:
v = getattr(val, prop.name)
if v is not None and (
not (prop.optional or (prop.multivalued and not v))
or (not prop.is_default(v))):
if xmlmode:
yield prop, prop.to_save_xml(v)
else:
yield prop, prop.to_save(v)
@classmethod
def inamevals_to_save(cls, val, xmlmode=False):
for prop, v in cls.ipropvals_to_save(val, xmlmode):
yield prop.name, v
@classmethod
def translate_from_xml(cls, list_of_pairs, strict):
d = {}
for k, v in list_of_pairs:
if k in cls.xmltagname_to_name_multivalued:
k2 = cls.xmltagname_to_name_multivalued[k]
if k2 not in d:
d[k2] = []
d[k2].append(v)
elif k in cls.xmltagname_to_name:
k2 = cls.xmltagname_to_name[k]
if k2 in d:
raise ArgumentError(
'Unexpectedly found more than one child element "%s" '
'within "%s".' % (k, cls.tagname))
d[k2] = v
elif k is None:
if cls.content_property:
k2 = cls.content_property.name
d[k2] = v
else:
if strict:
raise ArgumentError(
'Unexpected child element "%s" found within "%s".' % (
k, cls.tagname))
return d
def validate(self, val, regularize=False, depth=-1):
if self.optional and val is None:
return val
is_derived = isinstance(val, self.cls)
is_exact = type(val) == self.cls
not_ok = not self.strict and not is_derived or \
self.strict and not is_exact
if not_ok or self.force_regularize:
if regularize:
try:
val = self.regularize_extra(val)
except ValueError:
raise ValidationError(
'%s: could not convert "%s" to type %s' % (
self.xname(), val, classnames(self.cls)))
else:
raise ValidationError(
'%s: "%s" (type: %s) is not of type %s' % (
self.xname(), val, type(val), classnames(self.cls)))
validator = self
if isinstance(self.cls, tuple):
clss = self.cls
else:
clss = (self.cls,)
for cls in clss:
try:
if type(val) != cls and isinstance(val, cls):
validator = val.T.instance
except AttributeError:
pass
validator.validate_extra(val)
if depth != 0:
val = validator.validate_children(val, regularize, depth)
return val
def regularize_extra(self, val):
return self.cls(val)
def validate_extra(self, val):
pass
def validate_children(self, val, regularize, depth):
for prop, propval in self.ipropvals(val):
newpropval = prop.validate(propval, regularize, depth-1)
if regularize and (newpropval is not propval):
setattr(val, prop.name, newpropval)
return val
def to_save(self, val):
return val
def to_save_xml(self, val):
return self.to_save(val)
def extend_xmlelements(self, elems, v):
if self.multivalued:
for x in v:
elems.append((self.content_t.effective_xmltagname, x))
else:
elems.append((self.effective_xmltagname, v))
def deferred(self):
return []
def classname_for_help(self, strip_module=''):
if self.dummy_cls in guts_plain_dummy_types:
return '``%s``' % self.cls.__name__
elif self.dummy_cls.dummy_for_description:
return self.dummy_cls.dummy_for_description
else:
if self.dummy_cls is not self.cls:
if self.dummy_cls.__module__ == strip_module:
sadd = ' (:py:class:`%s`)' % (
self.dummy_cls.__name__)
else:
sadd = ' (:py:class:`%s.%s`)' % (
self.dummy_cls.__module__, self.dummy_cls.__name__)
else:
sadd = ''
def sclass(cls):
mod = cls.__module__
clsn = cls.__name__
if mod == '__builtin__' or mod == 'builtins':
return '``%s``' % clsn
elif mod == strip_module:
return ':py:class:`%s`' % clsn
else:
return ':py:class:`%s.%s`' % (mod, clsn)
if isinstance(self.cls, tuple):
return '(%s)%s' % (
' | '.join(sclass(cls) for cls in self.cls), sadd)
else:
return '%s%s' % (sclass(cls), sadd)
@classmethod
def props_help_string(cls):
baseprops = []
for base in cls.dummy_cls.__bases__:
if hasattr(base, 'T'):
baseprops.extend(base.T.properties)
hlp = []
hlp.append('')
for prop in cls.properties:
if prop in baseprops:
continue
descr = [
prop.classname_for_help(strip_module=cls.dummy_cls.__module__)]
if prop.optional:
descr.append('*optional*')
if isinstance(prop._default, DefaultMaker):
descr.append('*default:* ``%s``' % repr(prop._default))
else:
d = prop.default()
if d is not None:
descr.append('*default:* ``%s``' % repr(d))
hlp.append(' .. py:gattribute:: %s' % prop.name)
hlp.append('')
hlp.append(' %s' % ', '.join(descr))
hlp.append(' ')
if prop.help is not None:
hlp.append(' %s' % prop.help)
hlp.append('')
return '\n'.join(hlp)
@classmethod
def class_help_string(cls):
return cls.dummy_cls.__doc_template__
@classmethod
def class_signature(cls):
r = []
for prop in cls.properties:
d = prop.default()
if d is not None:
arg = repr(d)
elif prop.optional:
arg = 'None'
else:
arg = '...'
r.append('%s=%s' % (prop.name, arg))
return '(%s)' % ', '.join(r)
@classmethod
def help(cls):
return cls.props_help_string()
class ObjectMetaClass(type):
def __new__(meta, classname, bases, class_dict):
cls = type.__new__(meta, classname, bases, class_dict)
if classname != 'Object':
t_class_attr_name = '_%s__T' % classname
if not hasattr(cls, t_class_attr_name):
if hasattr(cls, 'T'):
class T(cls.T):
pass
else:
class T(TBase):
pass
setattr(cls, t_class_attr_name, T)
T = getattr(cls, t_class_attr_name)
if cls.dummy_for is not None:
T.cls = cls.dummy_for
else:
T.cls = cls
T.dummy_cls = cls
if hasattr(cls, 'xmltagname'):
T.xmltagname = cls.xmltagname
else:
T.xmltagname = classname
mod = sys.modules[cls.__module__]
if hasattr(cls, 'xmlns'):
T.xmlns = cls.xmlns
elif hasattr(mod, 'guts_xmlns'):
T.xmlns = mod.guts_xmlns
else:
T.xmlns = ''
if T.xmlns and hasattr(cls, 'guessable_xmlns'):
g_guessable_xmlns[T.xmltagname] = cls.guessable_xmlns
if hasattr(mod, 'guts_prefix'):
if mod.guts_prefix:
T.tagname = mod.guts_prefix + '.' + classname
else:
T.tagname = classname
else:
if cls.__module__ != '__main__':
T.tagname = cls.__module__ + '.' + classname
else:
T.tagname = classname
T.classname = classname
T.init_propertystuff()
for k in dir(cls):
prop = getattr(cls, k)
if k.endswith('__'):
k = k[:-2]
if isinstance(prop, TBase):
if prop.deferred():
for defer in prop.deferred():
g_deferred_content.setdefault(
defer.classname[:-2], []).append((prop, defer))
g_deferred.setdefault(
defer.classname[:-2], []).append((T, k, prop))
else:
T.add_property(k, prop)
elif isinstance(prop, Defer):
g_deferred.setdefault(prop.classname[:-2], []).append(
(T, k, prop))
if classname in g_deferred_content:
for prop, defer in g_deferred_content[classname]:
prop.process_deferred(
defer, T(*defer.args, **defer.kwargs))
del g_deferred_content[classname]
if classname in g_deferred:
for (T_, k_, prop_) in g_deferred.get(classname, []):
if isinstance(prop_, Defer):
prop_ = T(*prop_.args, **prop_.kwargs)
if not prop_.deferred():
T_.add_property(k_, prop_)
del g_deferred[classname]
g_tagname_to_class[T.tagname] = cls
if hasattr(cls, 'xmltagname'):
g_xmltagname_to_class[T.xmlns + ' ' + T.xmltagname] = cls
cls.T = T
T.instance = T()
cls.__doc_template__ = cls.__doc__
cls.__doc__ = T.class_help_string()
if cls.__doc__ is None:
cls.__doc__ = 'Undocumented.'
cls.__doc__ += '\n' + T.props_help_string()
return cls
class ValidationError(Exception):
pass
class ArgumentError(Exception):
pass
def make_default(x):
if isinstance(x, DefaultMaker):
return x.make()
elif isinstance(x, Object):
return clone(x)
else:
return x
class DefaultMaker(object):
def make(self):
raise NotImplementedError('Schould be implemented in subclass.')
class ObjectDefaultMaker(DefaultMaker):
def __init__(self, cls, args, kwargs):
DefaultMaker.__init__(self)
self.cls = cls
self.args = args
self.kwargs = kwargs
self.instance = None
def make(self):
return self.cls(
*[make_default(x) for x in self.args],
**dict((k, make_default(v)) for (k, v) in self.kwargs.items()))
def __eq__(self, other):
if self.instance is None:
self.instance = self.make()
return self.instance == other
def __repr__(self):
sargs = []
for arg in self.args:
sargs.append(repr(arg))
for k, v in self.kwargs.items():
sargs.append('%s=%s' % (k, repr(v)))
return '%s(%s)' % (self.cls.__name__, ', '.join(sargs))
class TimestampDefaultMaker(DefaultMaker):
def __init__(self, s, format='%Y-%m-%d %H:%M:%S.OPTFRAC'):
DefaultMaker.__init__(self)
self._stime = s
self._format = format
def make(self):
return str_to_time(self._stime, self._format)
def __repr__(self):
return "str_to_time(%s)" % repr(self._stime)
def with_metaclass(meta, *bases):
# inlined py2/py3 compat solution from python-future
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temp', None, {})
class Object(with_metaclass(ObjectMetaClass, object)):
dummy_for = None
dummy_for_description = None
def __init__(self, **kwargs):
if not kwargs.get('init_props', True):
return
for prop in self.T.properties:
k = prop.name
if k in kwargs:
setattr(self, k, kwargs.pop(k))
else:
if not prop.optional and not prop.has_default():
raise ArgumentError('Missing argument to %s: %s' % (
self.T.tagname, prop.name))
else:
setattr(self, k, prop.default())
if kwargs:
raise ArgumentError('Invalid argument to %s: %s' % (
self.T.tagname, ', '.join(list(kwargs.keys()))))
@classmethod
def D(cls, *args, **kwargs):
return ObjectDefaultMaker(cls, args, kwargs)
def validate(self, regularize=False, depth=-1):
self.T.instance.validate(self, regularize, depth)
def regularize(self, depth=-1):
self.validate(regularize=True, depth=depth)
def dump(self, stream=None, filename=None, header=False):
return dump(self, stream=stream, filename=filename, header=header)
def dump_xml(
self, stream=None, filename=None, header=False, ns_ignore=False):
return dump_xml(
self, stream=stream, filename=filename, header=header,
ns_ignore=ns_ignore)
@classmethod
def load(cls, stream=None, filename=None, string=None):
return load(stream=stream, filename=filename, string=string)
@classmethod
def load_xml(cls, stream=None, filename=None, string=None, ns_hints=None,
ns_ignore=False):
if ns_hints is None:
ns_hints = [cls.T.instance.get_xmlns()]
return load_xml(
stream=stream,
filename=filename,
string=string,
ns_hints=ns_hints,
ns_ignore=ns_ignore)
def __str__(self):
return self.dump()
def to_dict(obj):
'''
Get dict of guts object attributes.
:param obj: :py:class`Object` object
'''
return dict(obj.T.inamevals(obj))
class SObject(Object):
class __T(TBase):
def regularize_extra(self, val):
if isinstance(val, (str, newstr)):
return self.cls(val)
return val
def to_save(self, val):
return str(val)
def to_save_xml(self, val):
return str(val)
class Any(Object):
class __T(TBase):
def validate(self, val, regularize=False, depth=-1):
if isinstance(val, Object):
val.validate(regularize, depth)
return val
class Int(Object):
dummy_for = int
class __T(TBase):
strict = True
def to_save_xml(self, value):
return repr(value)
class Float(Object):
dummy_for = float
class __T(TBase):
strict = True
def to_save_xml(self, value):
return repr(value)
class Complex(Object):
dummy_for = complex
class __T(TBase):
strict = True
def regularize_extra(self, val):
if isinstance(val, list) or isinstance(val, tuple):
assert len(val) == 2
val = complex(*val)
elif not isinstance(val, complex):
val = complex(val)
return val
def to_save(self, value):
return repr(value)
def to_save_xml(self, value):
return repr(value)
class Bool(Object):
dummy_for = bool
class __T(TBase):
strict = True
def regularize_extra(self, val):
if isinstance(val, (str, newstr)):
if val.lower().strip() in ('0', 'false'):
return False
return bool(val)
def to_save_xml(self, value):
return repr(bool(value)).lower()
class String(Object):
dummy_for = str
class __T(TBase):
def __init__(self, *args, **kwargs):
yamlstyle = kwargs.pop('yamlstyle', None)
TBase.__init__(self, *args, **kwargs)
self.style_cls = str_style_map[yamlstyle]
def to_save(self, val):
return self.style_cls(val)
class Unicode(Object):
dummy_for = newstr
class __T(TBase):
def __init__(self, *args, **kwargs):
yamlstyle = kwargs.pop('yamlstyle', None)
TBase.__init__(self, *args, **kwargs)
self.style_cls = unicode_style_map[yamlstyle]
def to_save(self, val):
return self.style_cls(val)
guts_plain_dummy_types = (String, Unicode, Int, Float, Complex, Bool)
class Dict(Object):
dummy_for = dict
class __T(TBase):
multivalued = dict
def __init__(self, key_t=Any.T(), content_t=Any.T(), *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
assert isinstance(key_t, TBase)
assert isinstance(content_t, TBase)
self.key_t = key_t
self.content_t = content_t
self.content_t.parent = self
def default(self):
if self._default is not None:
return dict(
(make_default(k), make_default(v))
for (k, v) in self._default.items())
if self.optional:
return None
else:
return {}
def has_default(self):
return True
def validate(self, val, regularize, depth):
return TBase.validate(self, val, regularize, depth+1)
def validate_children(self, val, regularize, depth):
for key, ele in list(val.items()):
newkey = self.key_t.validate(key, regularize, depth-1)
newele = self.content_t.validate(ele, regularize, depth-1)
if regularize:
if newkey is not key or newele is not ele:
del val[key]
val[newkey] = newele
return val
def to_save(self, val):
return dict((self.key_t.to_save(k), self.content_t.to_save(v))
for (k, v) in val.items())
def to_save_xml(self, val):
raise NotImplementedError()
def classname_for_help(self, strip_module=''):
return '``dict`` of %s objects' % \
self.content_t.classname_for_help(strip_module=strip_module)
class List(Object):
dummy_for = list
class __T(TBase):
multivalued = list
def __init__(self, content_t=Any.T(), *args, **kwargs):
yamlstyle = kwargs.pop('yamlstyle', None)
TBase.__init__(self, *args, **kwargs)
assert isinstance(content_t, TBase) or isinstance(content_t, Defer)
self.content_t = content_t
self.content_t.parent = self
self.style_cls = list_style_map[yamlstyle]
def default(self):
if self._default is not None:
return [make_default(x) for x in self._default]
if self.optional:
return None
else:
return []
def has_default(self):
return True
def validate(self, val, regularize, depth):
return TBase.validate(self, val, regularize, depth+1)
def validate_children(self, val, regularize, depth):
for i, ele in enumerate(val):
newele = self.content_t.validate(ele, regularize, depth-1)
if regularize and newele is not ele:
val[i] = newele
return val
def to_save(self, val):
return self.style_cls(self.content_t.to_save(v) for v in val)
def to_save_xml(self, val):
return [self.content_t.to_save_xml(v) for v in val]
def deferred(self):
if isinstance(self.content_t, Defer):
return [self.content_t]
return []
def process_deferred(self, defer, t_inst):
if defer is self.content_t:
self.content_t = t_inst
def classname_for_help(self, strip_module=''):
return '``list`` of %s objects' % \
self.content_t.classname_for_help(strip_module=strip_module)
def make_typed_list_class(t):
class TL(List):
class __T(List.T):
def __init__(self, *args, **kwargs):
List.T.__init__(self, content_t=t.T(), *args, **kwargs)
return TL
class Tuple(Object):
dummy_for = tuple
class __T(TBase):
multivalued = tuple
def __init__(self, n=None, content_t=Any.T(), *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
assert isinstance(content_t, TBase)
self.content_t = content_t
self.content_t.parent = self
self.n = n
def default(self):
if self._default is not None:
return tuple(
make_default(x) for x in self._default)
elif self.optional:
return None
else:
if self.n is not None:
return tuple(
self.content_t.default() for x in range(self.n))
else:
return tuple()
def has_default(self):
return True
def validate(self, val, regularize, depth):
return TBase.validate(self, val, regularize, depth+1)
def validate_extra(self, val):
if self.n is not None and len(val) != self.n:
raise ValidationError(
'%s should have length %i' % (self.xname(), self.n))
def validate_children(self, val, regularize, depth):
if not regularize:
for ele in val:
self.content_t.validate(ele, regularize, depth-1)
return val
else:
newval = []
isnew = False
for ele in val:
newele = self.content_t.validate(ele, regularize, depth-1)
newval.append(newele)
if newele is not ele:
isnew = True
if isnew:
return tuple(newval)
else:
return val
def to_save(self, val):
return tuple(self.content_t.to_save(v) for v in val)
def to_save_xml(self, val):
return [self.content_t.to_save_xml(v) for v in val]
def classname_for_help(self, strip_module=''):
if self.n is not None:
return '``tuple`` of %i %s objects' % (
self.n, self.content_t.classname_for_help(
strip_module=strip_module))
else:
return '``tuple`` of %s objects' % (
self.content_t.classname_for_help(
strip_module=strip_module))
re_tz = re.compile(r'(Z|([+-][0-2][0-9])(:?([0-5][0-9]))?)$')
class Timestamp(Object):
dummy_for = (hpfloat, float)
dummy_for_description = 'time_float'
class __T(TBase):
def regularize_extra(self, val):
time_float = get_time_float()
if isinstance(val, datetime.datetime):
tt = val.utctimetuple()
val = time_float(calendar.timegm(tt)) + val.microsecond * 1e-6
elif isinstance(val, datetime.date):
tt = val.timetuple()
val = time_float(calendar.timegm(tt))
elif isinstance(val, (str, newstr)):
val = val.strip()
tz_offset = 0
m = re_tz.search(val)
if m:
sh = m.group(2)
sm = m.group(4)
tz_offset = (int(sh)*3600 if sh else 0) \
+ (int(sm)*60 if sm else 0)
val = re_tz.sub('', val)
if len(val) > 10 and val[10] == 'T':
val = val.replace('T', ' ', 1)
try:
val = str_to_time(val) - tz_offset
except TimeStrError:
raise ValidationError(
'%s: cannot parse time/date: %s' % (self.xname(), val))
elif isinstance(val, (int, float)):
val = time_float(val)
else:
raise ValidationError(
'%s: cannot convert "%s" to type %s' % (
self.xname(), val, time_float))
return val
def to_save(self, val):
return time_to_str(val, format='%Y-%m-%d %H:%M:%S.9FRAC')\
.rstrip('0').rstrip('.')
def to_save_xml(self, val):
return time_to_str(val, format='%Y-%m-%dT%H:%M:%S.9FRAC')\
.rstrip('0').rstrip('.') + 'Z'
@classmethod
def D(self, s):
return TimestampDefaultMaker(s)
class DateTimestamp(Object):
dummy_for = (hpfloat, float)
dummy_for_description = 'time_float'
class __T(TBase):
def regularize_extra(self, val):
time_float = get_time_float()
if isinstance(val, datetime.datetime):
tt = val.utctimetuple()
val = time_float(calendar.timegm(tt)) + val.microsecond * 1e-6
elif isinstance(val, datetime.date):
tt = val.timetuple()
val = time_float(calendar.timegm(tt))
elif isinstance(val, (str, newstr)):
val = str_to_time(val, format='%Y-%m-%d')
elif isinstance(val, int):
val = time_float(val)
return val
def to_save(self, val):
return time_to_str(val, format='%Y-%m-%d')
def to_save_xml(self, val):
return time_to_str(val, format='%Y-%m-%d')
@classmethod
def D(self, s):
return TimestampDefaultMaker(s, format='%Y-%m-%d')
class StringPattern(String):
'''Any ``str`` matching pattern ``%(pattern)s``.'''
dummy_for = str
pattern = '.*'
class __T(String.T):
def __init__(self, pattern=None, *args, **kwargs):
String.T.__init__(self, *args, **kwargs)
if pattern is not None:
self.pattern = pattern
else:
self.pattern = self.dummy_cls.pattern
def validate_extra(self, val):
pat = self.pattern
if not re.search(pat, val):
raise ValidationError('%s: "%s" does not match pattern %s' % (
self.xname(), val, repr(pat)))
@classmethod
def class_help_string(cls):
dcls = cls.dummy_cls
doc = dcls.__doc_template__ or StringPattern.__doc_template__
return doc % {'pattern': repr(dcls.pattern)}
class UnicodePattern(Unicode):
'''Any ``unicode`` matching pattern ``%(pattern)s``.'''
dummy_for = newstr
pattern = '.*'
class __T(TBase):
def __init__(self, pattern=None, *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
if pattern is not None:
self.pattern = pattern
else:
self.pattern = self.dummy_cls.pattern
def validate_extra(self, val):
pat = self.pattern
if not re.search(pat, val, flags=re.UNICODE):
raise ValidationError('%s: "%s" does not match pattern %s' % (
self.xname(), val, repr(pat)))
@classmethod
def class_help_string(cls):
dcls = cls.dummy_cls
doc = dcls.__doc_template__ or UnicodePattern.__doc_template__
return doc % {'pattern': repr(dcls.pattern)}
class StringChoice(String):
'''Any ``str`` out of ``%(choices)s``.'''
dummy_for = str
choices = []
ignore_case = False
class __T(String.T):
def __init__(self, choices=None, ignore_case=None, *args, **kwargs):
String.T.__init__(self, *args, **kwargs)
if choices is not None:
self.choices = choices
else:
self.choices = self.dummy_cls.choices
if ignore_case is not None:
self.ignore_case = ignore_case
else:
self.ignore_case = self.dummy_cls.ignore_case
if self.ignore_case:
self.choices = [x.upper() for x in self.choices]
def validate_extra(self, val):
if self.ignore_case:
val = val.upper()
if val not in self.choices:
raise ValidationError(
'%s: "%s" is not a valid choice out of %s' % (
self.xname(), val, repr(self.choices)))
@classmethod
def class_help_string(cls):
dcls = cls.dummy_cls
doc = dcls.__doc_template__ or StringChoice.__doc_template__
return doc % {'choices': repr(dcls.choices)}
# this will not always work...
class Union(Object):
members = []
dummy_for = str
class __T(TBase):
def __init__(self, members=None, *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
if members is not None:
self.members = members
else:
self.members = self.dummy_cls.members
def validate(self, val, regularize=False, depth=-1):
assert self.members
e2 = None
for member in self.members:
try:
return member.validate(val, regularize, depth=depth)
except ValidationError as e:
e2 = e
raise e2
class Choice(Object):
choices = []
class __T(TBase):
def __init__(self, choices=None, *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
if choices is not None:
self.choices = choices
else:
self.choices = self.dummy_cls.choices
self.cls_to_xmltagname = dict(
(t.cls, t.get_xmltagname()) for t in self.choices)
def validate(self, val, regularize=False, depth=-1):
if self.optional and val is None:
return val
t = None
for tc in self.choices:
is_derived = isinstance(val, tc.cls)
is_exact = type(val) == tc.cls
if not (not tc.strict and not is_derived or
tc.strict and not is_exact):
t = tc
break
if t is None:
if regularize:
ok = False
for tc in self.choices:
try:
val = tc.regularize_extra(val)
ok = True
t = tc
break
except (ValidationError, ValueError):
pass
if not ok:
raise ValidationError(
'%s: could not convert "%s" to any type out of '
'(%s)' % (self.xname(), val, ','.join(
classnames(x.cls) for x in self.choices)))
else:
raise ValidationError(
'%s: "%s" (type: %s) is not of any type out of '
'(%s)' % (self.xname(), val, type(val), ','.join(
classnames(x.cls) for x in self.choices)))
validator = t
if isinstance(t.cls, tuple):
clss = t.cls
else:
clss = (t.cls,)
for cls in clss:
try:
if type(val) != cls and isinstance(val, cls):
validator = val.T.instance
except AttributeError:
pass
validator.validate_extra(val)
if depth != 0:
val = validator.validate_children(val, regularize, depth)
return val
def extend_xmlelements(self, elems, v):
elems.append((
self.cls_to_xmltagname[type(v)].split(' ', 1)[-1], v))
def _dump(
object, stream,
header=False,
Dumper=GutsSafeDumper,
_dump_function=yaml.dump):
if not getattr(stream, 'encoding', None):
enc = encode_utf8
else:
enc = no_encode
if header:
stream.write(enc(u'%YAML 1.1\n'))
if isinstance(header, (str, newstr)):
banner = u'\n'.join('# ' + x for x in header.splitlines()) + '\n'
stream.write(enc(banner))
_dump_function(
object,
stream=stream,
encoding='utf-8',
explicit_start=True,
Dumper=Dumper)
def _dump_all(object, stream, header=True, Dumper=GutsSafeDumper):
_dump(object, stream=stream, header=header, _dump_function=yaml.dump_all)
def _load(stream,
Loader=GutsSafeLoader, allow_include=None, filename=None,
included_files=None):
class _Loader(Loader):
_filename = filename
_allow_include = allow_include
_included_files = included_files or []
return yaml.load(stream=stream, Loader=_Loader)
def _load_all(stream,
Loader=GutsSafeLoader, allow_include=None, filename=None):
class _Loader(Loader):
_filename = filename
_allow_include = allow_include
return list(yaml.load_all(stream=stream, Loader=_Loader))
def _iload_all(stream,
Loader=GutsSafeLoader, allow_include=None, filename=None):
class _Loader(Loader):
_filename = filename
_allow_include = allow_include
return yaml.load_all(stream=stream, Loader=_Loader)
def multi_representer(dumper, data):
node = dumper.represent_mapping(
'!'+data.T.tagname, data.T.inamevals_to_save(data), flow_style=False)
return node
# hack for compatibility with early GF Store versions
re_compatibility = re.compile(
r'^pyrocko\.(trace|gf\.(meta|seismosizer)|fomosto\.'
r'(dummy|poel|qseis|qssp))\.'
)
def multi_constructor(loader, tag_suffix, node):
tagname = str(tag_suffix)
tagname = re_compatibility.sub('pf.', tagname)
cls = g_tagname_to_class[tagname]
kwargs = dict(iter(loader.construct_pairs(node, deep=True)))
o = cls(**kwargs)
o.validate(regularize=True, depth=1)
return o
def include_constructor(loader, node):
allow_include = loader._allow_include \
if loader._allow_include is not None \
else ALLOW_INCLUDE
if not allow_include:
raise EnvironmentError(
'Not allowed to include YAML. Load with allow_include=True')
if isinstance(node, yaml.nodes.ScalarNode):
inc_file = loader.construct_scalar(node)
else:
raise TypeError('Unsupported YAML node %s' % repr(node))
if loader._filename is not None and not op.isabs(inc_file):
inc_file = op.join(op.dirname(loader._filename), inc_file)
if not op.isfile(inc_file):
raise FileNotFoundError(inc_file)
included_files = list(loader._included_files)
if loader._filename is not None:
included_files.append(op.abspath(loader._filename))
for included_file in loader._included_files:
if op.samefile(inc_file, included_file):
raise ImportError(
'Circular import of file "%s". Include path: %s' % (
op.abspath(inc_file),
' -> '.join('"%s"' % s for s in included_files)))
with open(inc_file) as f:
return _load(
f,
Loader=loader.__class__, filename=inc_file,
allow_include=True,
included_files=included_files)
def dict_noflow_representer(dumper, data):
return dumper.represent_mapping(
'tag:yaml.org,2002:map', data, flow_style=False)
yaml.add_multi_representer(Object, multi_representer, Dumper=GutsSafeDumper)
yaml.add_constructor('!include', include_constructor, Loader=GutsSafeLoader)
yaml.add_multi_constructor('!', multi_constructor, Loader=GutsSafeLoader)
yaml.add_representer(dict, dict_noflow_representer, Dumper=GutsSafeDumper)
def newstr_representer(dumper, data):
return dumper.represent_scalar(
'tag:yaml.org,2002:str', unicode(data))
yaml.add_representer(newstr, newstr_representer, Dumper=GutsSafeDumper)
class Constructor(object):
def __init__(self, add_namespace_maps=False, strict=False, ns_hints=None,
ns_ignore=False):
self.stack = []
self.queue = []
self.namespaces = defaultdict(list)
self.add_namespace_maps = add_namespace_maps
self.strict = strict
self.ns_hints = ns_hints
self.ns_ignore = ns_ignore
def start_element(self, ns_name, attrs):
if self.ns_ignore:
ns_name = ns_name.split(' ')[-1]
if -1 == ns_name.find(' '):
if self.ns_hints is None and ns_name in g_guessable_xmlns:
self.ns_hints = g_guessable_xmlns[ns_name]
if self.ns_hints:
ns_names = [
ns_hint + ' ' + ns_name for ns_hint in self.ns_hints]
elif self.ns_hints is None:
ns_names = [' ' + ns_name]
else:
ns_names = [ns_name]
for ns_name in ns_names:
if self.stack and self.stack[-1][1] is not None:
cls = self.stack[-1][1].T.xmltagname_to_class.get(
ns_name, None)
if isinstance(cls, tuple):
cls = None
else:
if cls is not None and (
not issubclass(cls, Object)
or issubclass(cls, SObject)):
cls = None
else:
cls = g_xmltagname_to_class.get(ns_name, None)
if cls:
break
self.stack.append((ns_name, cls, attrs, [], []))
def end_element(self, _):
ns_name, cls, attrs, content2, content1 = self.stack.pop()
ns = ns_name.split(' ', 1)[0]
if cls is not None:
content2.extend(
(ns + ' ' + k if -1 == k.find(' ') else k, v)
for (k, v) in attrs.items())
content2.append((None, ''.join(content1)))
o = cls(**cls.T.translate_from_xml(content2, self.strict))
o.validate(regularize=True, depth=1)
if self.add_namespace_maps:
o.namespace_map = self.get_current_namespace_map()
if self.stack and not all(x[1] is None for x in self.stack):
self.stack[-1][-2].append((ns_name, o))
else:
self.queue.append(o)
else:
content = [''.join(content1)]
if self.stack:
for c in content:
self.stack[-1][-2].append((ns_name, c))
def characters(self, char_content):
if self.stack:
self.stack[-1][-1].append(char_content)
def start_namespace(self, ns, uri):
self.namespaces[ns].append(uri)
def end_namespace(self, ns):
self.namespaces[ns].pop()
def get_current_namespace_map(self):
return dict((k, v[-1]) for (k, v) in self.namespaces.items() if v)
def get_queued_elements(self):
queue = self.queue
self.queue = []
return queue
def _iload_all_xml(
stream,
bufsize=100000,
add_namespace_maps=False,
strict=False,
ns_hints=None,
ns_ignore=False):
from xml.parsers.expat import ParserCreate
parser = ParserCreate('UTF-8', namespace_separator=' ')
handler = Constructor(
add_namespace_maps=add_namespace_maps,
strict=strict,
ns_hints=ns_hints,
ns_ignore=ns_ignore)
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.characters
parser.StartNamespaceDeclHandler = handler.start_namespace
parser.EndNamespaceDeclHandler = handler.end_namespace
while True:
data = stream.read(bufsize)
parser.Parse(data, bool(not data))
for element in handler.get_queued_elements():
yield element
if not data:
break
def _load_all_xml(*args, **kwargs):
return list(_iload_all_xml(*args, **kwargs))
def _load_xml(*args, **kwargs):
g = _iload_all_xml(*args, **kwargs)
return next(g)
def _dump_all_xml(objects, stream, root_element_name='root', header=True):
if not getattr(stream, 'encoding', None):
enc = encode_utf8
else:
enc = no_encode
_dump_xml_header(stream, header)
beg = u'<%s>\n' % root_element_name
end = u'</%s>\n' % root_element_name
stream.write(enc(beg))
for ob in objects:
_dump_xml(ob, stream=stream)
stream.write(enc(end))
def _dump_xml_header(stream, banner=None):
if not getattr(stream, 'encoding', None):
enc = encode_utf8
else:
enc = no_encode
stream.write(enc(u'<?xml version="1.0" encoding="UTF-8" ?>\n'))
if isinstance(banner, (str, newstr)):
stream.write(enc(u'<!-- %s -->\n' % banner))
def _dump_xml(
obj, stream, depth=0, ns_name=None, header=False, ns_map=[],
ns_ignore=False):
from xml.sax.saxutils import escape, quoteattr
if not getattr(stream, 'encoding', None):
enc = encode_utf8
else:
enc = no_encode
if depth == 0 and header:
_dump_xml_header(stream, header)
indent = ' '*depth*2
if ns_name is None:
ns_name = obj.T.instance.get_xmltagname()
if -1 != ns_name.find(' '):
ns, name = ns_name.split(' ')
else:
ns, name = '', ns_name
if isinstance(obj, Object):
obj.validate(depth=1)
attrs = []
elems = []
added_ns = False
if not ns_ignore and ns and (not ns_map or ns_map[-1] != ns):
attrs.append(('xmlns', ns))
ns_map.append(ns)
added_ns = True
for prop, v in obj.T.ipropvals_to_save(obj, xmlmode=True):
if prop.xmlstyle == 'attribute':
assert not prop.multivalued
assert not isinstance(v, Object)
attrs.append((prop.effective_xmltagname, v))
elif prop.xmlstyle == 'content':
assert not prop.multivalued
assert not isinstance(v, Object)
elems.append((None, v))
else:
prop.extend_xmlelements(elems, v)
attr_str = ''
if attrs:
attr_str = ' ' + ' '.join(
'%s=%s' % (k.split(' ')[-1], quoteattr(str(v)))
for (k, v) in attrs)
if not elems:
stream.write(enc(u'%s<%s%s />\n' % (indent, name, attr_str)))
else:
oneline = len(elems) == 1 and elems[0][0] is None
stream.write(enc(u'%s<%s%s>%s' % (
indent,
name,
attr_str,
'' if oneline else '\n')))
for (k, v) in elems:
if k is None:
stream.write(enc(escape(newstr(v), {'\0': '�'})))
else:
_dump_xml(v, stream, depth+1, k, False, ns_map, ns_ignore)
stream.write(enc(u'%s</%s>\n' % (
'' if oneline else indent, name)))
if added_ns:
ns_map.pop()
else:
stream.write(enc(u'%s<%s>%s</%s>\n' % (
indent,
name,
escape(newstr(obj), {'\0': '�'}),
name)))
def walk(x, typ=None, path=()):
if typ is None or isinstance(x, typ):
yield path, x
if isinstance(x, Object):
for (prop, val) in x.T.ipropvals(x):
if prop.multivalued:
if val is not None:
for iele, ele in enumerate(val):
for y in walk(ele, typ,
path=path + ((prop.name, iele),)):
yield y
else:
for y in walk(val, typ, path=path+(prop.name,)):
yield y
def clone(x, pool=None):
'''
Clone guts object tree.
Traverses guts object tree and recursively clones all guts attributes,
falling back to :py:func:`copy.deepcopy` for non-guts objects. Objects
deriving from :py:class:`Object` are instantiated using their respective
init function. Multiply referenced objects in the source tree are multiply
referenced also in the destination tree.
This function can be used to clone guts objects ignoring any contained
run-time state, i.e. any of their attributes not defined as a guts
property.
'''
if pool is None:
pool = {}
if id(x) in pool:
x_copy = pool[id(x)]
else:
if isinstance(x, Object):
d = {}
for (prop, y) in x.T.ipropvals(x):
if y is not None:
if not prop.multivalued:
y_copy = clone(y, pool)
elif prop.multivalued is dict:
y_copy = dict(
(clone(zk, pool), clone(zv, pool))
for (zk, zv) in y.items())
else:
y_copy = type(y)(clone(z, pool) for z in y)
else:
y_copy = y
d[prop.name] = y_copy
x_copy = x.__class__(**d)
else:
x_copy = copy.deepcopy(x)
pool[id(x)] = x_copy
return x_copy
class YPathError(Exception):
'''This exception is raised for invalid ypath specifications.'''
pass
def _parse_yname(yname):
ident = r'[a-zA-Z][a-zA-Z0-9_]*'
rint = r'-?[0-9]+'
m = re.match(
r'^(%s)(\[((%s)?(:)(%s)?|(%s))\])?$'
% (ident, rint, rint, rint), yname)
if not m:
raise YPathError('Syntax error in component: "%s"' % yname)
d = dict(
name=m.group(1))
if m.group(2):
if m.group(5):
istart = iend = None
if m.group(4):
istart = int(m.group(4))
if m.group(6):
iend = int(m.group(6))
d['slice'] = (istart, iend)
else:
d['index'] = int(m.group(7))
return d
def _decend(obj, ynames):
if ynames:
for sobj in iter_elements(obj, ynames):
yield sobj
else:
yield obj
def iter_elements(obj, ypath):
'''
Generator yielding elements matching a given ypath specification.
:param obj: guts :py:class:`Object` instance
:param ypath: Dot-separated object path (e.g. 'root.child.child').
To access list objects use slice notatation (e.g.
'root.child[:].child[1:3].child[1]').
Raises :py:exc:`YPathError` on failure.
'''
try:
if isinstance(ypath, str):
ynames = ypath.split('.')
else:
ynames = ypath
yname = ynames[0]
ynames = ynames[1:]
d = _parse_yname(yname)
if d['name'] not in obj.T.propnames:
raise AttributeError(d['name'])
obj = getattr(obj, d['name'])
if 'index' in d:
sobj = obj[d['index']]
for ssobj in _decend(sobj, ynames):
yield ssobj
elif 'slice' in d:
for i in range(*slice(*d['slice']).indices(len(obj))):
sobj = obj[i]
for ssobj in _decend(sobj, ynames):
yield ssobj
else:
for sobj in _decend(obj, ynames):
yield sobj
except (AttributeError, IndexError) as e:
raise YPathError('Invalid ypath: "%s" (%s)' % (ypath, str(e)))
def get_elements(obj, ypath):
'''
Get all elements matching a given ypath specification.
:param obj: guts :py:class:`Object` instance
:param ypath: Dot-separated object path (e.g. 'root.child.child').
To access list objects use slice notatation (e.g.
'root.child[:].child[1:3].child[1]').
Raises :py:exc:`YPathError` on failure.
'''
return list(iter_elements(obj, ypath))
def set_elements(obj, ypath, value, validate=False, regularize=False):
'''
Set elements matching a given ypath specification.
:param obj: guts :py:class:`Object` instance
:param ypath: Dot-separated object path (e.g. 'root.child.child').
To access list objects use slice notatation (e.g.
'root.child[:].child[1:3].child[1]').
:param value: All matching elements will be set to `value`.
:param validate: Whether to validate affected subtrees.
:param regularize: Whether to regularize affected subtrees.
Raises :py:exc:`YPathError` on failure.
'''
ynames = ypath.split('.')
try:
d = _parse_yname(ynames[-1])
for sobj in iter_elements(obj, ynames[:-1]):
if d['name'] not in sobj.T.propnames:
raise AttributeError(d['name'])
if 'index' in d:
ssobj = getattr(sobj, d['name'])
ssobj[d['index']] = value
elif 'slice' in d:
ssobj = getattr(sobj, d['name'])
for i in range(*slice(*d['slice']).indices(len(ssobj))):
ssobj[i] = value
else:
setattr(sobj, d['name'], value)
if regularize:
sobj.regularize()
if validate:
sobj.validate()
except (AttributeError, IndexError, YPathError) as e:
raise YPathError('Invalid ypath: "%s" (%s)' % (ypath, str(e)))
def zip_walk(x, typ=None, path=(), stack=()):
if typ is None or isinstance(x, typ):
yield path, stack + (x,)
if isinstance(x, Object):
for (prop, val) in x.T.ipropvals(x):
if prop.multivalued:
if val is not None:
for iele, ele in enumerate(val):
for y in zip_walk(
ele, typ,
path=path + ((prop.name, iele),),
stack=stack + (x,)):
yield y
else:
for y in zip_walk(val, typ,
path=path+(prop.name,),
stack=stack + (x,)):
yield y
def path_element(x):
if isinstance(x, tuple):
return '%s[%i]' % x
else:
return x
def path_to_str(path):
return '.'.join(path_element(x) for x in path)
@expand_stream_args('w')
def dump(*args, **kwargs):
return _dump(*args, **kwargs)
@expand_stream_args('r')
def load(*args, **kwargs):
return _load(*args, **kwargs)
def load_string(s, *args, **kwargs):
return load(string=s, *args, **kwargs)
@expand_stream_args('w')
def dump_all(*args, **kwargs):
return _dump_all(*args, **kwargs)
@expand_stream_args('r')
def load_all(*args, **kwargs):
return _load_all(*args, **kwargs)
@expand_stream_args('r')
def iload_all(*args, **kwargs):
return _iload_all(*args, **kwargs)
@expand_stream_args('w')
def dump_xml(*args, **kwargs):
return _dump_xml(*args, **kwargs)
@expand_stream_args('r')
def load_xml(*args, **kwargs):
kwargs.pop('filename', None)
return _load_xml(*args, **kwargs)
def load_xml_string(s, *args, **kwargs):
return load_xml(string=s, *args, **kwargs)
@expand_stream_args('w')
def dump_all_xml(*args, **kwargs):
return _dump_all_xml(*args, **kwargs)
@expand_stream_args('r')
def load_all_xml(*args, **kwargs):
kwargs.pop('filename', None)
return _load_all_xml(*args, **kwargs)
@expand_stream_args('r')
def iload_all_xml(*args, **kwargs):
kwargs.pop('filename', None)
return _iload_all_xml(*args, **kwargs)
__all__ = guts_types + [
'guts_types', 'TBase', 'ValidationError',
'ArgumentError', 'Defer',
'dump', 'load',
'dump_all', 'load_all', 'iload_all',
'dump_xml', 'load_xml',
'dump_all_xml', 'load_all_xml', 'iload_all_xml',
'load_string',
'load_xml_string',
'make_typed_list_class', 'walk', 'zip_walk', 'path_to_str'
]
| gpl-3.0 | 227,704,453,569,894,140 | 27.204367 | 79 | 0.520422 | false |
Tanjay94/auxiclean | auxiclean/unittests/test_distributeur.py | 1 | 1987 | import unittest
import tempfile
import os
from auxiclean import Distributeur
class TestBase(unittest.TestCase):
# Cours (nom),Cours (code),Dispo,Programme
cours = {}
# Nom,Premier Choix,Deuxieme Choix,Troisieme Choix,Quatrieme Choix,
# Cinquieme Choix,Disponibilite,Tp_total,Schoplarite,Nobel,
# Programme d'etude,Cote Z
candidatures = {}
def setUp(self):
# use temporary file to do the tests
self.tempdir = tempfile.TemporaryDirectory()
self.cours_path = os.path.join(self.tempdir.name, "cours.csv")
self.stud_path = os.path.join(self.tempdir.name, "students.csv")
with open(self.cours_path, "w") as f:
# write cours
f.write("First line skipped\n")
for c, v in self.cours.items():
s = ",".join([c] + v) + "\n"
f.write(s)
with open(self.stud_path, "w") as f:
# write candidatures
f.write("First line skipped\n")
for name, v in self.candidatures.items():
s = ",".join([name] + v) + "\n"
f.write(s)
self.distributeur = Distributeur(self.stud_path,
self.cours_path)
def tearDown(self):
self.tempdir.cleanup()
del self.tempdir
del self.distributeur
class TestDistributeur(TestBase):
cours = {"Electro": ["1", "1", "1"],
"Astro": ["10", "1", "1"]}
candidatures = {"Albert A": ["101", "403", "0", "0", "0", "2",
"6", "2", "0", "2", "3"],
"Claude C": ["210", "211", "0", "0", "0", "2", "3", "3",
"0", "3", "3"]}
def test_running(self):
# simple test that checks that both candidates receive
# their first choice.
dist = self.distributeur.distribution
self.assertEqual(dist["Electro"][0], "Albert A")
self.assertEqual(dist["Astro"][0], "Claude C")
| mit | 1,499,030,532,619,302,700 | 35.796296 | 76 | 0.529945 | false |
wkentaro/fcn | examples/voc/train_fcn16s.py | 1 | 2034 | #!/usr/bin/env python
import argparse
import datetime
import os
import os.path as osp
os.environ['MPLBACKEND'] = 'Agg' # NOQA
import chainer
import fcn
from train_fcn32s import get_data
from train_fcn32s import get_trainer
here = osp.dirname(osp.abspath(__file__))
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id')
parser.add_argument(
'--fcn32s-file', default=fcn.models.FCN32s.pretrained_model,
help='pretrained model file of FCN32s')
args = parser.parse_args()
args.model = 'FCN16s'
args.lr = 1.0e-12
args.momentum = 0.99
args.weight_decay = 0.0005
args.max_iteration = 100000
args.interval_print = 20
args.interval_eval = 4000
now = datetime.datetime.now()
args.timestamp = now.isoformat()
args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S'))
# data
class_names, iter_train, iter_valid, iter_valid_raw = get_data()
n_class = len(class_names)
# model
fcn32s = fcn.models.FCN32s()
chainer.serializers.load_npz(args.fcn32s_file, fcn32s)
model = fcn.models.FCN16s(n_class=n_class)
model.init_from_fcn32s(fcn32s)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
# optimizer
optimizer = chainer.optimizers.MomentumSGD(
lr=args.lr, momentum=args.momentum)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=args.weight_decay))
for p in model.params():
if p.name == 'b':
p.update_rule = chainer.optimizers.momentum_sgd.MomentumSGDRule(
lr=optimizer.lr * 2, momentum=0)
model.upscore2.disable_update()
model.upscore16.disable_update()
# trainer
trainer = get_trainer(optimizer, iter_train, iter_valid, iter_valid_raw,
class_names, args)
trainer.run()
if __name__ == '__main__':
main()
| mit | 2,136,855,453,675,468,800 | 26.12 | 78 | 0.648476 | false |
ADEQUATeDQ/portalmonitor | odpw/utils/error_handling.py | 1 | 3597 | import traceback
from ast import literal_eval
from collections import defaultdict
import ckanapi
import exceptions
import requests
import urlnorm
import structlog
log =structlog.get_logger()
class TimeoutError(Exception):
def __init__(self, message, timeout):
# Call the base class constructor with the parameters it needs
super(TimeoutError, self).__init__(message)
# Now for your custom code...
self.timeout = timeout
class ErrorHandler():
exceptions=defaultdict(long)
DEBUG=False
@classmethod
def handleError(cls, log, msg=None, exception=None, debug=False, **kwargs):
name=type(exception).__name__
cls.exceptions[name] +=1
if debug:
print(traceback.format_exc())
log.error(msg, exctype=type(exception), excmsg=exception.message, **kwargs)
@classmethod
def printStats(cls):
print '>>>','--*'*10,'EXCEPTIONS','*--'*10
if len(cls.exceptions)==0:
print "No exceptions handled"
else:
print " Numbers of Exceptions:"
for exc, count in cls.exceptions.iteritems():
print " ",exc, count
print '<<<','--*'*25
errorStatus={
702:'Connection Error'
,703:'Connection Timeout'
,704:'Read Timeout'
,705:'HTTPError'
,706:'TooManyRedirects'
,707:'Timeout'
,801:'ValueError'
,802:'TimeoutError'
,901:'InvalidUrl'
,902:'InvalidSchema'
,903:'MissingSchema'
,904:'MissingDatasets'
,600:'Not Specified'
,666:'Robots.txt'
}
def getExceptionCode(e):
#connection erorrs
try:
if isinstance(e,requests.exceptions.ConnectionError):
return 702
if isinstance(e,requests.exceptions.ConnectTimeout):
return 703
if isinstance(e,requests.exceptions.ReadTimeout):
return 704
if isinstance(e,requests.exceptions.HTTPError):
return 705
if isinstance(e,requests.exceptions.TooManyRedirects):
return 706
if isinstance(e,requests.exceptions.Timeout):
return 707
if isinstance(e,ckanapi.errors.CKANAPIError):
try:
err = literal_eval(e.extra_msg)
return err[1]
except Exception:
return 708
#if isinstance(e,requests.exceptions.RetryError):
# return 708
#parser errors
if isinstance(e, exceptions.ValueError):
return 801
if isinstance(e , TimeoutError):
return 802
#format errors
if isinstance(e,urlnorm.InvalidUrl):
return 901
if isinstance(e,requests.exceptions.InvalidSchema):
return 902
if isinstance(e,requests.exceptions.MissingSchema):
return 903
else:
return 600
except Exception as e:
log.error("MISSING Exception CODE", exctype=type(e), excmsg=e.message,exc_info=True)
return 601
def getExceptionString(e):
try:
if isinstance(e,ckanapi.errors.CKANAPIError):
try:
err = literal_eval(e.extra_msg)
return str(type(e))+":"+str(err[2])
except Exception:
return str(type(e))+":"+str(e.extra_msg)
else:
if e.message:
return str(type(e))+":"+str(e.message)
if e.message:
return str(type(e))+":"
except Exception as e:
log.error("Get Exception string", exctype=type(e), excmsg=e.message,exc_info=True)
return 601 | gpl-3.0 | -7,502,333,142,481,159,000 | 25.651852 | 92 | 0.591882 | false |
tiancj/emesene | emesene/e3/papylib/papyon/papyon/msnp/notification.py | 1 | 39205 | # -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2005-2007 Ali Sabil <[email protected]>
# Copyright (C) 2005-2006 Ole André Vadla Ravnås <[email protected]>
# Copyright (C) 2007 Johann Prieur <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Notification protocol Implementation
Implements the protocol used to communicate with the Notification Server."""
from base import BaseProtocol
from message import Message
from constants import ProtocolConstant, ProtocolError, ProtocolState
from challenge import _msn_challenge
import papyon
from papyon.gnet.message.HTTP import HTTPMessage
from papyon.util.async import run
from papyon.util.queue import PriorityQueue, LastElementQueue
from papyon.util.decorator import throttled
from papyon.util.encoding import decode_rfc2047_string
from papyon.util.parsing import build_account, parse_account
from papyon.util.timer import Timer
import papyon.util.element_tree as ElementTree
import papyon.profile as profile
import papyon.service.SingleSignOn as SSO
import papyon.service.AddressBook as AB
import papyon.service.OfflineIM as OIM
import random
import base64
import hmac
import hashlib
import time
import logging
import uuid
import urllib
import gobject
import xml.sax.saxutils as xml_utils
__all__ = ['NotificationProtocol']
logger = logging.getLogger('papyon.protocol.notification')
class NotificationProtocol(BaseProtocol, Timer):
"""Protocol used to communicate with the Notification Server
@undocumented: do_get_property, do_set_property
@group Handlers: _handle_*, _default_handler, _error_handler
@ivar state: the current protocol state
@type state: integer
@see L{constants.ProtocolState}"""
__gsignals__ = {
"buddy-notification-received" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object, object, object, object)),
"mail-received" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
"switchboard-invitation-received" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object, object)),
"unmanaged-message-received" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object, object)),
}
def __init__(self, client, transport, proxies={}, version=15):
"""Initializer
@param client: the parent instance of L{client.Client}
@type client: L{client.Client}
@param transport: The transport to use to speak the protocol
@type transport: L{transport.BaseTransport}
@param proxies: a dictonary mapping the proxy type to a
L{gnet.proxy.ProxyInfos} instance
@type proxies: {type: string, proxy:L{gnet.proxy.ProxyInfos}}
"""
BaseProtocol.__init__(self, client, transport, proxies)
Timer.__init__(self)
self.__state = ProtocolState.CLOSED
self._protocol_version = version
self._callbacks = {} # tr_id=>(callback, errback)
self._time_id = 0
self.tokens = None
# Properties ------------------------------------------------------------
def __get_state(self):
return self.__state
def __set_state(self, state):
self.__state = state
self.notify("state")
state = property(__get_state)
_state = property(__get_state, __set_state)
def do_get_property(self, pspec):
if pspec.name == "state":
return self.__state
else:
raise AttributeError, "unknown property %s" % pspec.name
def do_set_property(self, pspec, value):
raise AttributeError, "unknown property %s" % pspec.name
# Public API -------------------------------------------------------------
@throttled(2, LastElementQueue())
def set_presence(self, presence, client_id=0, msn_object=None):
"""Publish the new user presence.
@param presence: the new presence
@type presence: string L{profile.Presence}"""
if msn_object == None:
msn_object = ""
if presence == profile.Presence.OFFLINE:
self._client.logout()
else:
if msn_object:
self._client._msn_object_store.publish(msn_object)
self._send_command('CHG',
(presence, str(client_id), urllib.quote(str(msn_object))))
@throttled(2, LastElementQueue())
def set_display_name(self, display_name):
"""Sets the new display name
@param display_name: the new friendly name
@type display_name: string"""
self._send_command('PRP',
('MFN', urllib.quote(display_name)))
@throttled(2, LastElementQueue())
def set_personal_message(self, personal_message='', current_media=None,
signature_sound=None):
"""Sets the new personal message
@param personal_message: the new personal message
@type personal_message: string"""
cm = ''
if current_media is not None:
cm = '\\0Music\\01\\0{0} - {1}\\0%s\\0%s\\0\\0' % \
(xml_utils.escape(current_media[0]),
xml_utils.escape(current_media[1]))
if signature_sound is not None:
signature_sound = xml_utils.escape(signature_sound)
ss = '<SignatureSound>%s</SignatureSound>' % signature_sound
message = xml_utils.escape(personal_message)
pm = '<Data>'\
'<PSM>%s</PSM>'\
'<CurrentMedia>%s</CurrentMedia>'\
'<MachineGuid>%s</MachineGuid>'\
'</Data>' % (message, cm, str(self._client.machine_guid).upper())
self._send_command('UUX', payload=pm)
self._client.profile._server_property_changed("personal-message",
personal_message)
if current_media is not None:
self._client.profile._server_property_changed("current-media",
current_media)
@throttled(2, LastElementQueue())
def set_end_point_name(self, name="Papyon", idle=False):
ep = '<EndpointData>'\
'<Capabilities>%s</Capabilities>'\
'</EndpointData>' % self._client.profile.client_id
name = xml_utils.escape(name)
pep = '<PrivateEndpointData>'\
'<EpName>%s</EpName>'\
'<Idle>%s</Idle>'\
'<State>%s</State>'\
'<ClientType>%i</ClientType>'\
'</PrivateEndpointData>' % (name, str(idle).lower(),
self._client.profile.presence, self._client.client_type)
self._send_command('UUX', payload=ep)
self._send_command('UUX', payload=pep)
@throttled(2, LastElementQueue())
def set_privacy(self, privacy=profile.Privacy.BLOCK):
self._send_command("BLP", (privacy,))
def signoff(self):
"""Logout from the server"""
if self._state >= ProtocolState.AUTHENTICATING:
self._send_command('OUT')
self._transport.lose_connection()
@throttled(7, list())
def request_switchboard(self, priority, callback):
self.__switchboard_callbacks.add(callback, priority)
self._send_command('XFR', ('SB',))
def add_contact_to_membership(self, account,
network_id=profile.NetworkID.MSN,
membership=profile.Membership.FORWARD):
"""Add a contact to a given membership.
@param account: the contact identifier
@type account: string
@param network_id: the contact network
@type network_id: integer
@see L{papyon.profile.NetworkID}
@param membership: the list to be added to
@type membership: integer
@see L{papyon.profile.Membership}"""
if network_id == profile.NetworkID.MOBILE:
payload = '<ml><t><c n="tel:%s" l="%d" /></t></ml>' % \
(account, membership)
self._send_command("ADL", payload=payload)
else:
user, domain = account.split("@", 1)
payload = '<ml><d n="%s"><c n="%s" l="%d" t="%d"/></d></ml>' % \
(domain, user, membership, network_id)
self._send_command("ADL", payload=payload)
def remove_contact_from_membership(self, account,
network_id=profile.NetworkID.MSN,
membership=profile.Membership.FORWARD):
"""Remove a contact from a given membership.
@param account: the contact identifier
@type account: string
@param network_id: the contact network
@type network_id: integer
@see L{papyon.profile.NetworkID}
@param membership: the list to be added to
@type membership: integer
@see L{papyon.profile.Membership}"""
if network_id == profile.NetworkID.MOBILE:
payload = '<ml><t><c n="tel:%s" l="%d" /></t></ml>' % \
(account, membership)
self._send_command("RML", payload=payload)
else:
user, domain = account.split("@", 1)
payload = '<ml><d n="%s"><c n="%s" l="%d" t="%d"/></d></ml>' % \
(domain, user, membership, network_id)
self._send_command("RML", payload=payload)
def send_user_notification(self, message, contact, contact_guid, type,
callback=None, errback=None):
account = build_account(contact, contact_guid)
arguments = (account, type)
tr_id = self._send_command("UUN", arguments, message, True)
self._callbacks[tr_id] = (callback, errback)
def send_unmanaged_message(self, contact, message, callback=None,
errback=None):
content_type = message.content_type[0]
if content_type == 'text/x-msnmsgr-datacast':
message_type = 3
elif content_type == 'text/x-msmsgscontrol':
message_type = 2
else:
message_type = 1
tr_id = self._send_command('UUM',
(contact.account, contact.network_id, message_type),
payload=message, callback=callback)
self._callbacks[tr_id] = (None, errback)
def send_url_request(self, url_command_args, callback):
tr_id = self._send_command('URL', url_command_args)
self._callbacks[tr_id] = (callback, None)
# Helpers ----------------------------------------------------------------
def __derive_key(self, key, magic):
hash1 = hmac.new(key, magic, hashlib.sha1).digest()
hash2 = hmac.new(key, hash1 + magic, hashlib.sha1).digest()
hash3 = hmac.new(key, hash1, hashlib.sha1).digest()
hash4 = hmac.new(key, hash3 + magic, hashlib.sha1).digest()
return hash2 + hash4[0:4]
def __search_account(self, account, network_id=profile.NetworkID.MSN):
contact = self._client.address_book.search_contact(account, network_id)
if contact is None:
logger.warning("Contact (network_id=%d) %s not found" % \
(network_id, account))
return contact
def __parse_network_and_account(self, command, idx=0):
if self._protocol_version >= 18 and ":" in command.arguments[idx]:
temp = command.arguments[idx].split(":")
network_id = int(temp[0])
account = temp[1]
else:
account = command.arguments[idx]
idx += 1
network_id = int(command.arguments[idx])
idx += 1
return idx, network_id, account
def __find_node(self, parent, name, default):
node = parent.find(name)
if node is not None and node.text is not None:
return node.text.encode("utf-8")
else:
return default
# Handlers ---------------------------------------------------------------
# --------- Connection ---------------------------------------------------
def _handle_VER(self, command):
self._protocol_version = int(command.arguments[0].lstrip('MSNP'))
self._send_command('CVR',
ProtocolConstant.CVR + (self._client.profile.account,))
def _handle_CVR(self, command):
method = 'SSO'
self._send_command('USR',
(method, 'I', self._client.profile.account))
def _handle_XFR(self, command):
if command.arguments[0] == 'NS':
try:
host, port = command.arguments[1].split(":", 1)
port = int(port)
except ValueError:
host = command.arguments[1]
port = self._transport.server[1]
logger.debug("<-> Redirecting to " + command.arguments[1])
self._transport.reset_connection((host, port))
else: # connect to a switchboard
try:
host, port = command.arguments[1].split(":", 1)
port = int(port)
except ValueError:
host = command.arguments[1]
port = self._transport.server[1]
session_id = command.arguments[3]
callback = self.__switchboard_callbacks.pop(0)
run(callback, ((host, port), session_id, None))
def _handle_USR(self, command):
args_len = len(command.arguments)
# MSNP15 have only 4 params for final USR
assert(args_len == 3 or args_len == 4), \
"Received USR with invalid number of params : " + str(command)
if command.arguments[0] == "OK":
self._state = ProtocolState.AUTHENTICATED
# we need to authenticate with a passport server
elif command.arguments[1] == "S":
self._state = ProtocolState.AUTHENTICATING
if command.arguments[0] == "SSO":
self._client._sso.RequestMultipleSecurityTokens(
(self._sso_cb, command.arguments[3]),
((lambda error: self.emit("error",
ProtocolError.AUTHENTICATION_FAILED)),),
SSO.LiveService.MESSENGER_CLEAR)
self._client.address_book.connect("notify::state",
self._address_book_state_changed_cb)
self._client.address_book.connect("messenger-contact-added",
self._address_book_contact_added_cb)
self._client.address_book.connect("contact-accepted",
self._address_book_contact_accepted_cb)
self._client.address_book.connect("contact-rejected",
self._address_book_contact_rejected_cb)
self._client.address_book.connect("contact-deleted",
self._address_book_contact_deleted_cb)
self._client.address_book.connect("contact-blocked",
self._address_book_contact_blocked_cb)
self._client.address_book.connect("contact-unblocked",
self._address_book_contact_unblocked_cb)
self._client.address_book.connect("contact-allowed",
self._address_book_contact_allowed_cb)
self._client.address_book.connect("contact-disallowed",
self._address_book_contact_disallowed_cb)
elif command.arguments[0] == "TWN":
raise NotImplementedError, "Missing Implementation, please fix"
def _handle_SBS(self, command): # unknown command
pass
def _handle_QNG(self, command):
timeout = int(command.arguments[0])
self.start_timeout("ping", timeout)
self._time_id = time.time()
self.start_timeout(("qing", self._time_id), timeout+5)
def _handle_OUT(self, command):
reason = None
if len(command.arguments) > 0:
reason = command.arguments[0]
if reason == "OTH":
self.emit("error", ProtocolError.OTHER_CLIENT)
elif reason == "SSD":
self.emit("error", ProtocolError.SERVER_DOWN)
else:
self._transport.lose_connection()
# --------- Presence & Privacy -------------------------------------------
def _handle_BLP(self, command):
self._client.profile._server_property_changed("privacy",
command.arguments[0])
def _handle_GCF(self, command):
pass
def _handle_CHG(self, command):
self._client.profile._server_property_changed("presence",
command.arguments[0])
if len(command.arguments) > 2:
if command.arguments[2] != '0':
msn_object = papyon.p2p.MSNObject.parse(self._client,
urllib.unquote(command.arguments[2]))
else:
msn_object = None
self._client.profile._server_property_changed("msn_object", msn_object)
else:
self._client.profile._server_property_changed("msn_object", None)
def _handle_ILN(self, command):
self._handle_NLN(command)
def _handle_FLN(self, command):
idx, network_id, account = self.__parse_network_and_account(command)
contact = self.__search_account(account, network_id)
if contact is not None:
contact._remove_flag(profile.ContactFlag.EXTENDED_PRESENCE_KNOWN)
contact._server_property_changed("presence",
profile.Presence.OFFLINE)
def _handle_NLN(self, command):
idx, network_id, account = self.__parse_network_and_account(command, 1)
presence = command.arguments[0]
display_name = urllib.unquote(command.arguments[idx])
idx += 1
capabilities = command.arguments[idx]
idx += 1
msn_object = None
icon_url = None
if len(command.arguments) > idx:
if command.arguments[idx] not in ('0', '1'):
msn_object = papyon.p2p.MSNObject.parse(self._client,
urllib.unquote(command.arguments[idx]))
idx += 1
if len(command.arguments) > idx:
icon_url = command.arguments[idx]
contact = self.__search_account(account, network_id)
if contact is not None:
contact._server_property_changed("presence", presence)
# don't change capabilities
if contact is not self._client.profile:
contact._server_property_changed("client-capabilities", capabilities)
contact._server_property_changed("display-name", display_name)
# only change MSNObject if the extended presence is known (MSNP18)
if self._protocol_version < 18 or \
contact.has_flag(profile.ContactFlag.EXTENDED_PRESENCE_KNOWN):
contact._server_property_changed("msn_object", msn_object)
if icon_url is not None:
contact._server_attribute_changed('icon_url', icon_url)
# --------- Display name and co ------------------------------------------
def _handle_PRP(self, command):
ctype = command.arguments[0]
if len(command.arguments) < 2: return
if ctype == 'MFN':
self._client.profile._server_property_changed('display-name',
urllib.unquote(command.arguments[1]))
# TODO: add support for other stuff
def _handle_UUX(self, command):
pass
def _handle_UBN(self, command): # contact infos
if not command.payload:
return
account, guid = parse_account(command.arguments[0])
contact = self.__search_account(account)
type = int(command.arguments[1])
payload = command.payload
self.emit("buddy-notification-received", contact, guid, type, payload)
def _handle_UBX(self, command): # contact infos
if not command.payload:
return
idx, network_id, account = self.__parse_network_and_account(command)
try:
tree = ElementTree.fromstring(command.payload)
except:
logger.error("Invalid XML data in received UBX command")
return
utl = self.__find_node(tree, "./UserTileLocation", "")
cm_parts = self.__find_node(tree, "./CurrentMedia", "").split('\\0')
pm = self.__find_node(tree, "./PSM", "")
rmu = self.__find_node(tree, "./RMU", "")
ss = self.__find_node(tree, "./SignatureSound", None)
mg = self.__find_node(tree, "./MachineGuid", "{}").lower()[1:-1]
msn_object = None
if utl != "" and utl != "0":
msn_object = papyon.p2p.MSNObject.parse(self._client, utl)
cm = None
if len(cm_parts) >= 6 and cm_parts[1] == 'Music' and cm_parts[2] == '1':
cm = (cm_parts[4], cm_parts[5])
eps = tree.findall("./EndpointData")
end_points = {}
for ep in eps:
guid = uuid.UUID(ep.get("id"))
caps = self.__find_node(ep, "Capabilities", "0:0")
end_points[guid] = profile.EndPoint(guid, caps)
peps = tree.findall("./PrivateEndpointData")
for pep in peps:
guid = uuid.UUID(pep.get("id"))
if guid not in end_points: continue
end_point = end_points[guid]
end_point.name = self.__find_node(pep, "EpName", "")
end_point.idle = bool(self.__find_node(pep, "Idle", "").lower() == "true")
end_point.client_type = int(self.__find_node(pep, "ClientType", 0))
end_point.state = self.__find_node(pep, "State", "")
contact = self.__search_account(account, network_id)
if contact is not None:
contact._add_flag(profile.ContactFlag.EXTENDED_PRESENCE_KNOWN)
contact._server_property_changed("end-points", end_points)
contact._server_property_changed("msn-object", msn_object)
contact._server_property_changed("current-media", cm)
contact._server_property_changed("personal-message", pm)
contact._server_property_changed("signature-sound", ss)
def _handle_UUN(self, command): # UBN acknowledgment
self._command_answered_cb(command.transaction_id)
# --------- Contact List -------------------------------------------------
def _handle_ADL(self, command):
if len(command.arguments) > 0 and command.arguments[0] == "OK":
# Confirmation for one of our ADLs
if command.transaction_id != 0 \
and self._state != ProtocolState.OPEN:
# Initial ADL
self._state = ProtocolState.OPEN
self._transport.enable_ping()
else:
if command.payload:
# Incoming payload ADL from the server
self._client.address_book.sync(True)
def _handle_RML(self, command):
pass
def _handle_FQY(self, command):
pass
# --------- Messages -----------------------------------------------------
def _handle_MSG(self, command):
message = Message(None, command.payload)
content_type = message.content_type
if content_type[0] == 'text/x-msmsgsprofile':
profile = {}
lines = command.payload.split("\r\n")
for line in lines:
line = line.strip()
if line:
name, value = line.split(":", 1)
profile[name] = value.strip()
self._client.profile._server_property_changed("profile", profile)
self.set_privacy(self._client.profile.privacy)
self._state = ProtocolState.SYNCHRONIZING
self._client.address_book.sync()
elif content_type[0] in \
('text/x-msmsgsinitialmdatanotification', \
'text/x-msmsgsoimnotification'):
if self._client.oim_box is not None:
self._client.oim_box._state = \
OIM.OfflineMessagesBoxState.NOT_SYNCHRONIZED
m = HTTPMessage()
m.parse(message.body)
mail_data = m.get_header('Mail-Data').strip()
if mail_data == 'too-large':
mail_data = None
self._client.oim_box.sync(mail_data)
if mail_data and \
content_type[0] == 'text/x-msmsgsinitialmdatanotification':
#Initial mail
start = mail_data.find('<IU>') + 4
end = mail_data.find('</IU>')
if start < end:
mailbox_unread = int(mail_data[start:end])
self._client.mailbox._initial_set(mailbox_unread)
elif content_type[0] == 'text/x-msmsgsinitialemailnotification':
#Initial mail (obsolete by MSNP11)
pass
elif content_type[0] == 'text/x-msmsgsemailnotification':
#New mail
m = HTTPMessage()
m.parse(message.body)
name = decode_rfc2047_string(m.get_header('From'))
address = m.get_header('From-Addr')
subject = decode_rfc2047_string(m.get_header('Subject'))
message_url = m.get_header('Message-URL')
post_url = m.get_header('Post-URL')
post_id = m.get_header('id')
dest = m.get_header('Dest-Folder')
if dest == 'ACTIVE':
self._client.mailbox._unread_mail_increased(1)
build = self._build_url_post_data
post_url, form_data = build(message_url, post_url, post_id)
self._client.mailbox._new_mail(name, address, subject,
post_url, form_data)
elif content_type[0] == 'text/x-msmsgsactivemailnotification':
#Movement of unread mail
m = HTTPMessage()
m.parse(message.body)
src = m.get_header('Src-Folder')
dest = m.get_header('Dest-Folder')
delta = int(m.get_header('Message-Delta'))
if src == 'ACTIVE':
self._client.mailbox._unread_mail_decreased(delta)
elif dest == 'ACTIVE':
self._client.mailbox._unread_mail_increased(delta)
def _handle_UBM(self, command):
idx, network_id, account = self.__parse_network_and_account(command)
contact = self.__search_account(account, network_id)
if contact is not None:
message = Message(contact, command.payload)
self.emit("unmanaged-message-received", contact, message)
# --------- Urls ---------------------------------------------------------
def _build_url_post_data_new(self,
message_url="/cgi-bin/HoTMaiL",
post_url='https://login.live.com/ppsecure/sha1auth.srf?',
post_id='2'):
passportToken = self.tokens[SSO.LiveService.TB].security_token
proofToken = self.tokens[SSO.LiveService.TB].proof_token
nonce = ""
for i in range(0,24):
nonce += chr(int(random.random()*256))
# Create the percent encoded string
encodedString = \
"<EncryptedData xmlns=\"http://www.w3.org/2001/04/xmlenc#\"" \
"Id=\"BinaryDAToken0\" Type=\"http://www.w3.org/2001/04/xmlenc#Element\">" \
"<EncryptionMethodAlgorithm=\"http://www.w3.org/2001/04/xmlenc#tripledes-cbc\"/>" \
"<ds:KeyInfo xmlns:ds=\"http://www.w3.org/2000/09/xmldsig#\">" \
"<ds:KeyName>http://Passport.NET/STS</ds:KeyName>" \
"</ds:KeyInfo>" \
"<CipherData>" \
"<CipherValue>%s</CipherValue>" \
"</CipherData>" \
"</EncryptedData>" % passportToken
# Create the token
token_args = {
"ct" : int(time.time()),
"rru" : message_url,
"sru" : message_url,
"ru" : message_url,
"bver" : "4",
"svc" : "mail",
"js" : "yes",
"id" : post_id,
"pl" : "?id=%s" % post_id,
"da" : encodedString,
"nonce" : base64.b64encode(nonce)
}
# we have to replace manually -. because urllib doesn't do it
token = urllib.urlencode(token_args).replace("-", "%2D").replace(".", "%2E")
# Compute the keys with HMAC-Sha1 algorithm
key1 = base64.b64decode(proofToken)
magic = "WS-SecureConversation" + nonce
key2 = self.__derive_key(key1, magic)
myhash = hmac.new(key2, token, hashlib.sha1).digest()
token_args_part_two = {
"hash" : base64.b64encode(myhash)
}
token += "&" + urllib.urlencode(token_args_part_two)
post_data = dict([('token', token)])
return (post_url, post_data)
def _build_url_post_data(self,
message_url="/cgi-bin/HoTMaiL",
post_url='https://loginnet.passport.com/ppsecure/md5auth.srf?',
post_id='2'):
profile = self._client.profile.profile
account = self._client.profile.account
password = str(self._client.profile.password)
sl = str(int(time.time()) - int(profile['LoginTime']))
sid = profile['sid']
auth = profile['MSPAuth']
creds = hashlib.md5(auth + sl + password).hexdigest()
post_data = dict([
('mode', 'ttl'),
('login', account.split('@')[0]),
('username', account),
('sid', sid),
('kv', ''),
('id', post_id),
('sl', sl),
('rru', message_url),
('auth', auth),
('creds', creds),
('svc', 'mail'),
('js', 'yes')])
return (post_url, post_data)
def _handle_URL(self, command):
tr_id = command.transaction_id
if tr_id in self._callbacks:
message_url, post_url, post_id = command.arguments
post_url, form_dict = self._build_url_post_data(message_url,
post_url, post_id)
self._command_answered_cb(tr_id, post_url, form_dict)
# --------- Invitation ---------------------------------------------------
def _handle_RNG(self, command):
session_id = command.arguments[0]
host, port = command.arguments[1].split(':', 1)
port = int(port)
key = command.arguments[3]
account = command.arguments[4]
display_name = urllib.unquote(command.arguments[5])
session = ((host, port), session_id, key)
inviter = (account, display_name)
self.emit("switchboard-invitation-received", session, inviter)
# --------- Challenge ----------------------------------------------------
def _handle_QRY(self, command):
pass
def _handle_CHL(self, command):
response = _msn_challenge(command.arguments[0])
self._send_command('QRY',
(ProtocolConstant.PRODUCT_ID,), payload=response)
# --------- Notification -------------------------------------------------
def _handle_NOT(self, command):
notification_xml = xml_utils.unescape(command.payload)
notification = ElementTree.fromstring(notification_xml)
service = notification.findtext('MSG/BODY/NotificationData/Service')
if service != 'ABCHInternal':
return
try:
notification_id = notification.attrib['id']
site_id = notification.attrib['siteid']
message_id = notification.find('MSG').attrib['id']
send_device = notification.find('TO/VIA').attrib['agent']
receiver_cid = notification.findtext('MSG/BODY/NotificationData/CID')
receiver_account = notification.find('TO').attrib['name'].lower()
if notification_id != '0' or site_id != '45705' \
or message_id != '0' or send_device != 'messenger' \
or receiver_cid != str(self._client.profile.cid) \
or receiver_account != self._client.profile.account.lower():
return
except KeyError:
return
self._client.address_book.sync(True)
#---------- Errors -------------------------------------------------------
def _error_handler(self, error):
logger.error('Notification got error : ' + unicode(error))
self._command_error_cb(error.transaction_id, int(error.name))
# callbacks --------------------------------------------------------------
def _connect_cb(self, transport):
self.__switchboard_callbacks = PriorityQueue()
self._state = ProtocolState.OPENING
versions = []
for version in ProtocolConstant.VER:
if version <= self._protocol_version:
versions.append("MSNP%i" % version)
self._send_command('VER', versions)
def _disconnect_cb(self, transport, reason):
self.stop_all_timeout()
self._state = ProtocolState.CLOSED
def _sso_cb(self, tokens, nonce):
if self._state != ProtocolState.AUTHENTICATING:
return
self.tokens = tokens
clear_token = tokens[SSO.LiveService.MESSENGER_CLEAR]
token = clear_token.security_token
blob = clear_token.mbi_crypt(nonce)
if self._protocol_version >= 16:
arguments = ("SSO", "S", token, blob, "{%s}" %
str(self._client.machine_guid).upper())
else:
arguments = ("SSO", "S", token, blob)
self._send_command("USR", arguments)
def on_ping_timeout(self):
self._transport.enable_ping()
def on_qing_timeout(self, time_id):
if self._time_id == time_id:
self._transport.emit("connection-lost", "Ping timeout")
def _command_answered_cb(self, tr_id, *args):
callback, errback = self._callbacks.get(tr_id, (None, None))
run(callback, *args)
def _command_error_cb(self, tr_id, error):
callback, errback = self._callbacks.get(tr_id, (None, None))
run(errback, error)
def _address_book_state_changed_cb(self, address_book, pspec):
MAX_PAYLOAD_SIZE = 7500
if self._state != ProtocolState.SYNCHRONIZING:
return
if address_book.state != AB.AddressBookState.SYNCHRONIZED:
return
self._client.profile._cid = address_book.profile._cid
self._client.profile._server_property_changed("display-name",
address_book.profile.display_name)
contacts = address_book.contacts.group_by_domain()
mask = ~(profile.Membership.REVERSE | profile.Membership.PENDING)
for contact in address_book.contacts:
if (contact.memberships & mask & ~profile.Membership.FORWARD) == \
(profile.Membership.ALLOW | profile.Membership.BLOCK):
logger.warning("Contact is on both Allow and Block list; " \
"removing from Allow list (%s)" % contact.account)
contact._remove_membership(profile.Membership.ALLOW)
payloads = ['<ml l="1">']
for domain, contacts in contacts.iteritems():
payloads[-1] += '<d n="%s">' % domain
for contact in contacts:
user = contact.account.split("@", 1)[0]
lists = contact.memberships & mask
if lists == profile.Membership.NONE:
continue
network_id = contact.network_id
node = '<c n="%s" l="%d" t="%d"/>' % (user, lists, network_id)
size = len(payloads[-1]) + len(node) + len('</d></ml>')
if size >= MAX_PAYLOAD_SIZE:
payloads[-1] += '</d></ml>'
payloads.append('<ml l="1"><d n="%s">' % domain)
payloads[-1] += node
payloads[-1] += '</d>'
payloads[-1] += '</ml>'
import re
pattern = re.compile ('<d n="[^"]+"></d>')
for payload in payloads:
payload = pattern.sub('', payload)
self._send_command("ADL", payload=payload)
self._state = ProtocolState.SYNCHRONIZED
def _add_contact_to_membership(self, contact, membership):
self.add_contact_to_membership(contact.account, contact.network_id,
membership)
def _remove_contact_from_membership(self, contact, membership):
self.remove_contact_from_membership(contact.account,
contact.network_id, membership)
def _address_book_contact_added_cb(self, address_book, contact):
if contact.is_member(profile.Membership.ALLOW):
self._add_contact_to_membership(contact, profile.Membership.ALLOW)
self._add_contact_to_membership(contact, profile.Membership.FORWARD)
if contact.network_id != profile.NetworkID.MOBILE:
account, domain = contact.account.split('@', 1)
payload = '<ml l="2"><d n="%s"><c n="%s"/></d></ml>' % \
(domain, account)
self._send_command("FQY", payload=payload)
def _address_book_contact_deleted_cb(self, address_book, contact):
self._remove_contact_from_membership(contact, profile.Membership.FORWARD)
def _address_book_contact_accepted_cb(self, address_book, contact):
mask = ~(profile.Membership.REVERSE | profile.Membership.PENDING)
memberships = contact.memberships & mask
if memberships:
self._add_contact_to_membership(contact, memberships)
def _address_book_contact_rejected_cb(self, address_book, contact):
mask = ~(profile.Membership.REVERSE | profile.Membership.PENDING)
memberships = contact.memberships & mask
if memberships:
self._add_contact_to_membership(contact, memberships)
def _address_book_contact_blocked_cb(self, address_book, contact):
self._remove_contact_from_membership(contact, profile.Membership.ALLOW)
self._add_contact_to_membership(contact, profile.Membership.BLOCK)
def _address_book_contact_unblocked_cb(self, address_book, contact):
self._remove_contact_from_membership(contact, profile.Membership.BLOCK)
self._add_contact_to_membership(contact, profile.Membership.ALLOW)
def _address_book_contact_allowed_cb(self, address_book, contact):
self._add_contact_to_membership(contact, profile.Membership.ALLOW)
def _address_book_contact_disallowed_cb(self, address_book, contact):
self._remove_contact_from_membership(contact, profile.Membership.ALLOW)
| gpl-3.0 | 9,134,599,020,062,534,000 | 40.572641 | 95 | 0.563375 | false |
olivierfriard/BORIS | boris/portion/dict.py | 1 | 11282 | from .const import Bound
from .interval import Interval, singleton
from collections.abc import MutableMapping, Mapping
from sortedcontainers import SortedDict
def _sort(i):
# Sort by lower bound, closed first
return (i[0].lower, i[0].left is Bound.OPEN)
class IntervalDict(MutableMapping):
"""
An IntervalDict is a dict-like data structure that maps from intervals to data,
where keys can be single values or Interval instances.
When keys are Interval instances, its behaviour merely corresponds to
range queries and it returns IntervalDict instances corresponding to the
subset of values covered by the given interval. If no matching value is
found, an empty IntervalDict is returned.
When keys are "single values", its behaviour corresponds to the one of Python
built-in dict. When no matchin value is found, a KeyError is raised.
Note that this class does not aim to have the best performance, but is
provided mainly for convenience. Its performance mainly depends on the
number of distinct values (not keys) that are stored.
"""
__slots__ = ('_storage', )
def __init__(self, mapping_or_iterable=None):
"""
Return a new IntervalDict.
If no argument is given, an empty IntervalDict is created. If an argument
is given, and is a mapping object (e.g., another IntervalDict), an
new IntervalDict with the same key-value pairs is created. If an
iterable is provided, it has to be a list of (key, value) pairs.
:param mapping_or_iterable: optional mapping or iterable.
"""
self._storage = SortedDict(_sort) # Mapping from intervals to values
if mapping_or_iterable is not None:
self.update(mapping_or_iterable)
@classmethod
def _from_items(cls, items):
"""
Fast creation of an IntervalDict with the provided items.
The items have to satisfy the two following properties: (1) all keys
must be disjoint intervals and (2) all values must be distinct.
:param items: list of (key, value) pairs.
:return: an IntervalDict
"""
d = cls()
for key, value in items:
d._storage[key] = value
return d
def clear(self):
"""
Remove all items from the IntervalDict.
"""
self._storage.clear()
def copy(self):
"""
Return a shallow copy.
:return: a shallow copy.
"""
return IntervalDict._from_items(self.items())
def get(self, key, default=None):
"""
Return the values associated to given key.
If the key is a single value, it returns a single value (if it exists) or
the default value. If the key is an Interval, it returns a new IntervalDict
restricted to given interval. In that case, the default value is used to
"fill the gaps" (if any) w.r.t. given key.
:param key: a single value or an Interval instance.
:param default: default value (default to None).
:return: an IntervalDict, or a single value if key is not an Interval.
"""
if isinstance(key, Interval):
d = self[key]
d[key - d.domain()] = default
return d
else:
try:
return self[key]
except KeyError:
return default
def find(self, value):
"""
Return a (possibly empty) Interval i such that self[i] = value, and
self[~i] != value.
:param value: value to look for.
:return: an Interval instance.
"""
return Interval(*(i for i, v in self._storage.items() if v == value))
def items(self):
"""
Return a view object on the contained items sorted by key
(see https://docs.python.org/3/library/stdtypes.html#dict-views).
:return: a view object.
"""
return self._storage.items()
def keys(self):
"""
Return a view object on the contained keys (sorted)
(see https://docs.python.org/3/library/stdtypes.html#dict-views).
:return: a view object.
"""
return self._storage.keys()
def values(self):
"""
Return a view object on the contained values sorted by key
(see https://docs.python.org/3/library/stdtypes.html#dict-views).
:return: a view object.
"""
return self._storage.values()
def domain(self):
"""
Return an Interval corresponding to the domain of this IntervalDict.
:return: an Interval.
"""
return Interval(*self._storage.keys())
def pop(self, key, default=None):
"""
Remove key and return the corresponding value if key is not an Interval.
If key is an interval, it returns an IntervalDict instance.
This method combines self[key] and del self[key]. If a default value
is provided and is not None, it uses self.get(key, default) instead of
self[key].
:param key: a single value or an Interval instance.
:param default: optional default value.
:return: an IntervalDict, or a single value if key is not an Interval.
"""
if default is None:
value = self[key]
del self[key]
return value
else:
value = self.get(key, default)
try:
del self[key]
except KeyError:
pass
return value
def popitem(self):
"""
Remove and return some (key, value) pair as a 2-tuple.
Raise KeyError if D is empty.
:return: a (key, value) pair.
"""
return self._storage.popitem()
def setdefault(self, key, default=None):
"""
Return given key. If it does not exist, set its value to default and
return it.
:param key: a single value or an Interval instance.
:param default: default value (default to None).
:return: an IntervalDict, or a single value if key is not an Interval.
"""
if isinstance(key, Interval):
value = self.get(key, default)
self.update(value)
return value
else:
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, mapping_or_iterable):
"""
Update current IntervalDict with provided values.
If a mapping is provided, it must map Interval instances to values (e.g., another
IntervalDict). If an iterable is provided, it must consist of a list of
(key, value) pairs.
:param mapping_or_iterable: mapping or iterable.
"""
if isinstance(mapping_or_iterable, Mapping):
data = mapping_or_iterable.items()
else:
data = mapping_or_iterable
for i, v in data:
self[i] = v
def combine(self, other, how):
"""
Return a new IntervalDict that combines the values from current and
provided ones.
If d = d1.combine(d2, f), then d contains (1) all values from d1 whose
keys do not intersect the ones of d2, (2) all values from d2 whose keys
do not intersect the ones of d1, and (3) f(x, y) for x in d1, y in d2 for
intersecting keys.
:param other: another IntervalDict instance.
:param how: a function of two parameters that combines values.
:return: a new IntervalDict instance.
"""
new_items = []
dom1, dom2 = self.domain(), other.domain()
new_items.extend(self[dom1 - dom2].items())
new_items.extend(other[dom2 - dom1].items())
intersection = dom1 & dom2
d1, d2 = self[intersection], other[intersection]
for i1, v1 in d1.items():
for i2, v2 in d2.items():
if i1.overlaps(i2):
i = i1 & i2
v = how(v1, v2)
new_items.append((i, v))
return IntervalDict(new_items)
def as_dict(self):
"""
Return the content as a classical Python dict.
:return: a Python dict.
"""
return dict(self._storage)
def __getitem__(self, key):
if isinstance(key, Interval):
items = []
for i, v in self._storage.items():
intersection = key & i
if not intersection.empty:
items.append((intersection, v))
return IntervalDict._from_items(items)
else:
for i, v in self._storage.items():
if key in i:
return v
raise KeyError(key)
def __setitem__(self, key, value):
interval = key if isinstance(key, Interval) else singleton(key)
if interval.empty:
return
removed_keys = []
added_items = []
found = False
for i, v in self._storage.items():
if value == v:
found = True
# Extend existing key
removed_keys.append(i)
added_items.append((i | interval, v))
elif i.overlaps(interval):
# Reduce existing key
remaining = i - interval
removed_keys.append(i)
if not remaining.empty:
added_items.append((remaining, v))
if not found:
added_items.append((interval, value))
# Update storage accordingly
for key in removed_keys:
self._storage.pop(key)
for key, value in added_items:
self._storage[key] = value
def __delitem__(self, key):
interval = key if isinstance(key, Interval) else singleton(key)
if interval.empty:
return
removed_keys = []
added_items = []
found = False
for i, v in self._storage.items():
if i.overlaps(interval):
found = True
remaining = i - interval
removed_keys.append(i)
if not remaining.empty:
added_items.append((remaining, v))
if not found and not isinstance(key, Interval):
raise KeyError(key)
# Update storage accordingly
for key in removed_keys:
self._storage.pop(key)
for key, value in added_items:
self._storage[key] = value
def __or__(self, other):
d = self.copy()
d.update(other)
return d
def __ior__(self, other):
self.update(other)
return self
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def __contains__(self, key):
return key in self.domain()
def __repr__(self):
return '{}{}{}'.format(
'{',
', '.join('{!r}: {!r}'.format(i, v) for i, v in self.items()),
'}',
)
def __eq__(self, other):
if isinstance(other, IntervalDict):
return self.as_dict() == other.as_dict()
else:
return NotImplemented
| gpl-3.0 | -2,896,345,402,922,807,000 | 29.909589 | 89 | 0.564528 | false |
larbibaraka/Bitly_clone | bitly_clone/bitly__clone_app/migrations/0001_initial.py | 1 | 1048 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-07 21:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UrlShort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_url', models.URLField(unique=True, verbose_name='the user url')),
('url_code', models.CharField(max_length=6, unique=True)),
('date_creation', models.DateTimeField(auto_now_add=True, verbose_name='creation date')),
('update_time', models.DateTimeField(auto_now_add=True, verbose_name='creation date')),
('nike_name', models.CharField(blank=True, max_length=20, null=True, verbose_name='user nike name')),
('nb_access', models.IntegerField(default=0)),
],
),
]
| gpl-3.0 | -7,644,237,825,253,766,000 | 36.428571 | 117 | 0.59542 | false |
Fredehagelund92/Cheetax | cheetax/run_manager.py | 1 | 1120 |
from cheetax.exceptions import RuntimeException
class RunManager(object):
"""
RunManager is responsible for handling runners
based on its status.
"""
def __init__(self):
self.runners = []
self.total_time = 0
@property
def num_runners(self):
return len(self.runners)
def add_runner(self, runner):
self.runners.append(runner)
def call_runner(self, runner, index, total):
runner.before_execute(index, total)
result = runner.run()
runner.after_execute(result, index, total)
self.total_time = self.total_time + result.execution_time
if result.errored and runner.raise_on_first_error():
raise RuntimeException(result.error)
return result
def execute_runners(self):
for index, runner in enumerate(self.runners):
if index == 0:
runner.initial_msg()
result = self.call_runner(runner, index, len(self.runners))
if index == self.num_runners-1:
runner.final_msg(result, self.num_runners, self.total_time)
| apache-2.0 | 5,339,509,767,290,848,000 | 22.333333 | 75 | 0.609821 | false |
openhealthcare/sonar | innovation/projectmigrations/profiles/0004_remove_pseudonym.py | 1 | 5119 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Profile.is_pseudonymous'
db.delete_column('profiles_profile', 'is_pseudonymous')
# Deleting field 'Profile.pseudonym'
db.delete_column('profiles_profile', 'pseudonym')
def backwards(self, orm):
# Adding field 'Profile.is_pseudonymous'
db.add_column('profiles_profile', 'is_pseudonymous',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Profile.pseudonym'
db.add_column('profiles_profile', 'pseudonym',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'innovation.role': {
'Meta': {'object_name': 'Role'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile', '_ormbases': ['auth.User']},
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician_karma': ('django.db.models.fields.IntegerField', [], {}),
'industry_karma': ('django.db.models.fields.IntegerField', [], {}),
'patient_karma': ('django.db.models.fields.IntegerField', [], {}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['innovation.Role']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['profiles'] | gpl-2.0 | -2,631,759,668,109,640,700 | 59.952381 | 182 | 0.556163 | false |
ewels/MultiQC | multiqc/modules/deeptools/plotPCA.py | 1 | 2743 | #!/usr/bin/env python
""" MultiQC submodule to parse output from deepTools plotPCA """
import logging
from multiqc.plots import scatter
# Initialise the logger
log = logging.getLogger(__name__)
class plotPCAMixin:
def parse_plotPCA(self):
"""Find plotPCA output"""
self.deeptools_plotPCAData = dict()
for f in self.find_log_files("deeptools/plotPCAData", filehandles=False):
parsed_data = self.parsePlotPCAData(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotPCAData:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotPCAData[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section="plotPCA")
self.deeptools_plotPCAData = self.ignore_samples(self.deeptools_plotPCAData)
if len(self.deeptools_plotPCAData) > 0:
config = {
"id": "deeptools_pca_plot",
"title": "deeptools: PCA Plot",
"xlab": "PC1",
"ylab": "PC2",
"tt_label": "PC1 {point.x:.2f}: PC2 {point.y:.2f}",
}
data = dict()
for s_name in self.deeptools_plotPCAData:
try:
data[s_name] = {
"x": self.deeptools_plotPCAData[s_name][1],
"y": self.deeptools_plotPCAData[s_name][2],
}
except KeyError:
pass
if len(data) == 0:
log.debug("No valid data for PCA plot")
return None
self.add_section(
name="PCA plot",
anchor="deeptools_pca",
description="PCA plot with the top two principal components calculated based on genome-wide distribution of sequence reads",
plot=scatter.plot(data, config),
)
return len(self.deeptools_plotPCAData)
def parsePlotPCAData(self, f):
d = dict()
samples = []
for line in f["f"].splitlines():
cols = line.strip().split("\t")
if cols[0] == "#plotPCA --outFileNameData":
continue
elif cols[0] == "Component":
for c in cols[1 : (len(cols) - 1)]:
c = str(c).strip("'")
s_name = self.clean_s_name(c, f)
d[s_name] = {}
samples.append(s_name)
else:
idx = 0
compo = cols[0]
for c in cols[1 : (len(cols) - 1)]:
d[samples[idx]][self._int(compo)] = float(c)
idx += 1
return d
| gpl-3.0 | -5,186,901,610,799,465,000 | 34.623377 | 140 | 0.487423 | false |
stpierre/sponge | Sponge/sponge/forms/config.py | 1 | 1627 | from django import forms
from django.forms import widgets
from sponge.utils import config as config_utils
from sponge.utils import group as group_utils
class ConfigForm(forms.Form):
scheduler_username = forms.CharField(help_text="The username of a Pulp user who can modify all sync schedules. Granting 'read' and 'update' on '/repositories/' should be sufficient.")
scheduler_password = forms.CharField(help_text="The password for the sync schedule user",
widget=widgets.PasswordInput(),
required=False)
def __init__(self, *args, **kwargs):
forms.Form.__init__(self, *args, **kwargs)
self.fields['scheduler_username'].initial = \
config_utils.get('scheduler_username', None)
# i wish django supported fieldsets
for group in group_utils.get_groups():
cname = "sync_frequency_%s" % group
self.fields[cname] = \
forms.IntegerField(label="Sync frequency for %s" % group,
help_text="The frequency, in hours, with which to sync all repositories in group %s" % group,
initial=config_utils.get(cname, 24))
if "default" not in group_utils.get_groups():
self.fields['default'] = \
forms.IntegerField(label="Default sync frequency",
help_text="If a machine is in no groups, the frequency, in hours, with which to sync it",
initial=config_utils.get("sync_frequency_default", 24))
| gpl-2.0 | 8,748,941,539,162,094,000 | 53.233333 | 187 | 0.590658 | false |
jabooth/menpodetect | menpodetect/tests/dlib_test.py | 2 | 1037 | from menpodetect.dlib import load_dlib_frontal_face_detector
import menpo.io as mio
takeo = mio.import_builtin_asset.takeo_ppm()
def test_frontal_face_detector():
takeo_copy = takeo.copy()
dlib_detector = load_dlib_frontal_face_detector()
pcs = dlib_detector(takeo_copy)
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['dlib_0'].n_points == 4
def test_frontal_face_detector_rgb():
takeo_copy = takeo.copy()
assert takeo_copy.n_channels == 3
dlib_detector = load_dlib_frontal_face_detector()
pcs = dlib_detector(takeo_copy, greyscale=False)
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['dlib_0'].n_points == 4
def test_frontal_face_detector_upscales():
takeo_copy = takeo.copy()
dlib_detector = load_dlib_frontal_face_detector()
pcs = dlib_detector(takeo_copy, n_upscales=1)
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['dlib_0'].n_points == 4
| bsd-3-clause | -1,552,435,256,788,413,000 | 31.40625 | 60 | 0.684667 | false |
alexandregz/simian | src/tests/simian/mac/munki/handlers/__init___test.py | 1 | 4229 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Munki handlers __init__ module tests."""
import datetime
import logging
logging.basicConfig(filename='/dev/null')
from google.apputils import app
from tests.simian.mac.common import test
from simian.mac.munki import handlers
class HandlersTest(test.RequestHandlerTest):
"""__init__.py handlers tests."""
def GetTestClassInstance(self):
return handlers
def GetTestClassModule(self):
return handlers
def testStrHeaderDateToDatetime(self):
"""Tests StrHeaderDateToDatetime()."""
header_dt_str = 'Wed, 06 Oct 2010 03:23:34 GMT'
dt = datetime.datetime(2010, 10, 06, 03, 23, 34) # same date
r = handlers.StrHeaderDateToDatetime(header_dt_str)
self.assertEqual(dt, r)
def testStrHeaderDateToDatetimeNone(self):
"""Tests StrHeaderDateToDatetime()."""
self.assertEqual(None, handlers.StrHeaderDateToDatetime(''))
def testIsClientResourceExpiredWithEmptyDate(self):
"""Tests IsClientResourceExpired() with empty header str date."""
self.assertTrue(handlers.IsClientResourceExpired(None, ''))
def testPackageModifiedWithInvalidDate(self):
"""Tests IsClientResourceExpired() with non-parsable header str date."""
self.assertTrue(
handlers.IsClientResourceExpired(None, 'date will not parse'))
def testPackageModifiedMatchingDate(self):
"""Tests IsClientResourceExpired() with matching header str date."""
header_dt_str = 'Wed, 06 Oct 2010 03:23:34 GMT'
dt = datetime.datetime(2010, 10, 06, 03, 23, 34) # same date
self.assertFalse(handlers.IsClientResourceExpired(dt, header_dt_str))
def testPackageModifiedWherePackageDateNewer(self):
"""Tests IsClientResourceExpired() with matching header str date."""
header_dt_str = 'Mon, 01 Jan 1930 01:00:00 GMT'
dt = datetime.datetime(2010, 10, 06, 03, 23, 34) # later date
self.assertTrue(handlers.IsClientResourceExpired(dt, header_dt_str))
def testGetClientIdForRequestWithSession(self):
"""Tests GetClientIdForRequest()."""
track = 'stable'
os_version = '10.6.6'
client_id = 'client_id'
client_id_dict = {'track': track, 'os_version': os_version}
session = self.mox.CreateMockAnything()
session.uuid = 'uuid'
request = self.mox.CreateMockAnything()
request.headers = self.mox.CreateMockAnything()
request.headers.get('X-munki-client-id', '').AndReturn(client_id)
self.mox.StubOutWithMock(handlers.common, 'ParseClientId')
handlers.common.ParseClientId(client_id, uuid=session.uuid).AndReturn(
client_id_dict)
self.mox.ReplayAll()
r = handlers.GetClientIdForRequest(
request, session=session, client_id_str='')
self.assertEqual(r, client_id_dict)
self.mox.VerifyAll()
def testGetClientIdForRequestWithoutSession(self):
"""Tests GetClientIdForRequest()."""
track = 'stable'
os_version = '10.6.6'
client_id = 'client_id'
client_id_dict = {'track': track, 'os_version': os_version}
client_id_str = 'track=%s|os_version=%s' % (track, os_version)
client_id_str_quoted = handlers.urllib.quote(client_id_str)
request = self.mox.CreateMockAnything()
request.headers = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(handlers.common, 'ParseClientId')
handlers.common.ParseClientId(client_id_str).AndReturn(client_id_dict)
self.mox.ReplayAll()
r = handlers.GetClientIdForRequest(
request, session=None, client_id_str=client_id_str_quoted)
self.assertEqual(r, client_id_dict)
self.mox.VerifyAll()
def main(unused_argv):
test.main(unused_argv)
if __name__ == '__main__':
app.run()
| apache-2.0 | -7,469,800,519,277,540,000 | 33.950413 | 76 | 0.715063 | false |
moonfruit/yysite | polls/migrations/0001_initial.py | 1 | 1230 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-28 14:58
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| mit | 8,369,215,854,529,492,000 | 31.368421 | 114 | 0.569919 | false |
watchdogpolska/django-mptt | setup.py | 1 | 1475 | #!/usr/bin/env python
from setuptools import find_packages, setup
setup(
name='django-mptt',
description='''Utilities for implementing Modified Preorder Tree Traversal
with your Django Models and working with trees of Model instances.''',
version=__import__('mptt').__version__,
author='Craig de Stigter',
author_email='[email protected]',
url='http://github.com/django-mptt/django-mptt',
license='MIT License',
packages=find_packages(),
include_package_data=True,
install_requires=(
'Django>=1.8',
),
tests_require=(
'mock-django>=0.6.7',
'mock>=1.3',
),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Utilities',
],
)
| mit | -2,403,173,726,046,439,400 | 33.302326 | 78 | 0.595932 | false |
thenenadx/forseti-security | google/cloud/security/common/data_access/org_resource_rel_dao.py | 1 | 3106 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DAO for organization resource entity relationships."""
from google.cloud.security.common.data_access import folder_dao
from google.cloud.security.common.data_access import organization_dao
from google.cloud.security.common.data_access import project_dao
from google.cloud.security.common.gcp_type import resource
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc
class OrgResourceRelDao(object):
"""DAO for organization resource entity relationships."""
def __init__(self):
"""Initialize."""
# Map the org resource type to the appropriate dao class
self._resource_db_lookup = {
resource.ResourceType.ORGANIZATION: {
'dao': organization_dao.OrganizationDao(),
'get': 'get_organization',
},
resource.ResourceType.FOLDER: {
'dao': folder_dao.FolderDao(),
'get': 'get_folder',
},
resource.ResourceType.PROJECT: {
'dao': project_dao.ProjectDao(),
'get': 'get_project',
}
}
def find_ancestors(self, org_resource, snapshot_timestamp=None):
"""Find ancestors of a particular resource.
Args:
org_resource: A Resource.
snapshot_timestamp: The timestamp to use for data lookup.
Returns:
A list of ancestors, starting with the closest ancestor.
"""
# TODO: handle case where snapshot is None
ancestors = []
curr_resource = org_resource
while curr_resource is not None:
parent_resource = None
if (curr_resource.parent and
curr_resource.parent.type and
curr_resource.parent.id):
resource_lookup = self._resource_db_lookup.get(
curr_resource.parent.type, {})
# No dao object for the parent resource, so quit
if not resource_lookup.get('dao'):
break
# Invoke the dao.get_*() method, to get the parent resource
parent_resource = getattr(
resource_lookup.get('dao'),
resource_lookup.get('get'))(
curr_resource.parent.id, snapshot_timestamp)
if parent_resource:
ancestors.append(parent_resource)
curr_resource = parent_resource
return ancestors
| apache-2.0 | -3,991,928,533,554,824,700 | 35.116279 | 75 | 0.613973 | false |
industrydive/mezzanine | mezzanine/generic/forms.py | 1 | 9451 | from __future__ import unicode_literals
from future.builtins import int, str, zip
from django import forms
from django_comments.forms import CommentSecurityForm, CommentForm
from django_comments.signals import comment_was_posted
from django.contrib.admin.templatetags.admin_static import static
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.forms import Html5Mixin
from mezzanine.generic.models import Keyword, ThreadedComment, Rating
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.utils.email import split_addresses, send_mail_template
from mezzanine.utils.views import ip_for_request
class KeywordsWidget(forms.MultiWidget):
"""
Form field for the ``KeywordsField`` generic relation field. Since
the admin with model forms has no form field for generic
relations, this form field provides a single field for managing
the keywords. It contains two actual widgets, a text input for
entering keywords, and a hidden input that stores the ID of each
``Keyword`` instance.
The attached JavaScript adds behaviour so that when the form is
submitted, an AJAX post is made that passes the list of keywords
in the text input, and returns a list of keyword IDs which are
then entered into the hidden input before the form submits. The
list of IDs in the hidden input is what is used when retrieving
an actual value from the field for the form.
"""
class Media:
js = (static("mezzanine/js/admin/keywords_field.js"),)
def __init__(self, attrs=None):
"""
Setup the text and hidden form field widgets.
"""
widgets = (forms.HiddenInput,
forms.TextInput(attrs={"class": "vTextField"}))
super(KeywordsWidget, self).__init__(widgets, attrs)
self._ids = []
def decompress(self, value):
"""
Takes the sequence of ``AssignedKeyword`` instances and splits
them into lists of keyword IDs and titles each mapping to one
of the form field widgets.
"""
if hasattr(value, "select_related"):
keywords = [a.keyword for a in value.select_related("keyword")]
if keywords:
keywords = [(str(k.id), k.title) for k in keywords]
self._ids, words = list(zip(*keywords))
return (",".join(self._ids), ", ".join(words))
return ("", "")
def format_output(self, rendered_widgets):
"""
Wraps the output HTML with a list of all available ``Keyword``
instances that can be clicked on to toggle a keyword.
"""
rendered = super(KeywordsWidget, self).format_output(rendered_widgets)
links = ""
for keyword in Keyword.objects.all().order_by("title"):
prefix = "+" if str(keyword.id) not in self._ids else "-"
links += ("<a href='#'>%s%s</a>" % (prefix, str(keyword)))
rendered += mark_safe("<p class='keywords-field'>%s</p>" % links)
return rendered
def value_from_datadict(self, data, files, name):
"""
Return the comma separated list of keyword IDs for use in
``KeywordsField.save_form_data()``.
"""
return data.get("%s_0" % name, "")
class ThreadedCommentForm(CommentForm, Html5Mixin):
name = forms.CharField(label=_("Name"), help_text=_("required"),
max_length=50)
email = forms.EmailField(label=_("Email"),
help_text=_("required (not published)"))
url = forms.URLField(label=_("Website"), help_text=_("optional"),
required=False)
# These are used to get/set prepopulated fields via cookies.
cookie_fields = ("name", "email", "url")
cookie_prefix = "mezzanine-comment-"
def __init__(self, request, *args, **kwargs):
"""
Set some initial field values from cookies or the logged in
user, and apply some HTML5 attributes to the fields if the
``FORMS_USE_HTML5`` setting is ``True``.
"""
kwargs.setdefault("initial", {})
user = request.user
for field in ThreadedCommentForm.cookie_fields:
cookie_name = ThreadedCommentForm.cookie_prefix + field
value = request.COOKIES.get(cookie_name, "")
if not value and user.is_authenticated():
if field == "name":
value = user.get_full_name()
if not value and user.username != user.email:
value = user.username
elif field == "email":
value = user.email
kwargs["initial"][field] = value
super(ThreadedCommentForm, self).__init__(*args, **kwargs)
def get_comment_model(self):
"""
Use the custom comment model instead of the built-in one.
"""
return ThreadedComment
def check_for_duplicate_comment(self, new):
"""
We handle duplicates inside ``save``, since django_comments'
`check_for_duplicate_comment` doesn't deal with extra fields
defined on the comment model.
"""
return new
def save(self, request):
"""
Saves a new comment and sends any notification emails.
"""
comment = self.get_comment_object()
obj = comment.content_object
if request.user.is_authenticated():
comment.user = request.user
comment.by_author = request.user == getattr(obj, "user", None)
comment.ip_address = ip_for_request(request)
comment.replied_to_id = self.data.get("replied_to")
# Mezzanine's duplicate check that also checks `replied_to_id`.
lookup = {
"content_type": comment.content_type,
"object_pk": comment.object_pk,
"user_name": comment.user_name,
"user_email": comment.user_email,
"user_url": comment.user_url,
"replied_to_id": comment.replied_to_id,
}
for duplicate in self.get_comment_model().objects.filter(**lookup):
if (duplicate.submit_date.date() == comment.submit_date.date() and
duplicate.comment == comment.comment):
return duplicate
comment.save()
comment_was_posted.send(sender=comment.__class__, comment=comment,
request=request)
notify_emails = split_addresses(settings.COMMENTS_NOTIFICATION_EMAILS)
if notify_emails:
subject = ugettext("New comment for: ") + str(obj)
context = {
"comment": comment,
"comment_url": add_cache_bypass(comment.get_absolute_url()),
"request": request,
"obj": obj,
}
send_mail_template(subject, "email/comment_notification",
settings.DEFAULT_FROM_EMAIL, notify_emails,
context)
return comment
class RatingForm(CommentSecurityForm):
"""
Form for a rating. Subclasses ``CommentSecurityForm`` to make use
of its easy setup for generic relations.
"""
value = forms.ChoiceField(label="", widget=forms.RadioSelect,
choices=list(zip(
*(settings.RATINGS_RANGE,) * 2)))
def __init__(self, request, *args, **kwargs):
self.request = request
super(RatingForm, self).__init__(*args, **kwargs)
def clean(self):
"""
Check unauthenticated user's cookie as a light check to
prevent duplicate votes.
"""
bits = (self.data["content_type"], self.data["object_pk"])
request = self.request
self.current = "%s.%s" % bits
self.previous = request.COOKIES.get("mezzanine-rating", "").split(",")
already_rated = self.current in self.previous
if already_rated and not self.request.user.is_authenticated():
raise forms.ValidationError(ugettext("Already rated."))
return self.cleaned_data
def save(self):
"""
Saves a new rating - authenticated users can update the
value if they've previously rated.
"""
user = self.request.user
self.undoing = False
rating_value = self.cleaned_data["value"]
rating_name = self.target_object.get_ratingfield_name()
rating_manager = getattr(self.target_object, rating_name)
if user.is_authenticated():
try:
rating_instance = rating_manager.get(user=user)
except Rating.DoesNotExist:
rating_instance = Rating(user=user, value=rating_value)
rating_manager.add(rating_instance)
else:
if rating_instance.value != int(rating_value):
rating_instance.value = rating_value
rating_instance.save()
else:
# User submitted the same rating as previously,
# which we treat as undoing the rating (like a toggle).
rating_instance.delete()
self.undoing = True
else:
rating_instance = Rating(value=rating_value)
rating_manager.add(rating_instance)
return rating_instance
| bsd-2-clause | 398,190,695,416,197,300 | 40.091304 | 78 | 0.596022 | false |
jmb/geopy | geopy/geocoders/tomtom.py | 1 | 7727 | from geopy.compat import quote, urlencode
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("TomTom", )
class TomTom(Geocoder):
"""TomTom geocoder.
Documentation at:
https://developer.tomtom.com/search-api/search-api-documentation
.. versionadded:: 1.15.0
"""
geocode_path = '/search/2/geocode/%(query)s.json'
reverse_path = '/search/2/reverseGeocode/%(position)s.json'
def __init__(
self,
api_key,
format_string=None,
scheme=None,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
domain='api.tomtom.com',
):
"""
:param str api_key: TomTom API key.
:param str format_string:
See :attr:`geopy.geocoders.options.default_format_string`.
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param str domain: Domain where the target TomTom service
is hosted.
"""
super(TomTom, self).__init__(
format_string=format_string,
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
)
self.api_key = api_key
self.api = "%s://%s%s" % (self.scheme, domain, self.geocode_path)
self.api_reverse = "%s://%s%s" % (self.scheme, domain, self.reverse_path)
def geocode(
self,
query,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
limit=None,
typeahead=False,
language=None,
):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param int limit: Maximum amount of results to return from the service.
Unless exactly_one is set to False, limit will always be 1.
:param bool typeahead: If the "typeahead" flag is set, the query
will be interpreted as a partial input and the search will
enter predictive mode.
:param str language: Language in which search results should be
returned. When data in specified language is not
available for a specific field, default language is used.
List of supported languages (case-insensitive):
https://developer.tomtom.com/online-search/online-search-documentation/supported-languages
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
query = self.format_string % query
params = self._geocode_params(query)
params['typeahead'] = self._boolean_value(typeahead)
if limit:
params['limit'] = str(int(limit))
if exactly_one:
params['limit'] = '1'
if language:
params['language'] = language
quoted_query = quote(query.encode('utf-8'))
url = "?".join((self.api % dict(query=quoted_query),
urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def reverse(
self,
query,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
language=None,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str language: Language in which search results should be
returned. When data in specified language is not
available for a specific field, default language is used.
List of supported languages (case-insensitive):
https://developer.tomtom.com/online-search/online-search-documentation/supported-languages
.. versionadded:: 1.18.0
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
position = self._coerce_point_to_string(query)
params = self._reverse_params(position)
if language:
params['language'] = language
quoted_position = quote(position.encode('utf-8'))
url = "?".join((self.api_reverse % dict(position=quoted_position),
urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_reverse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
@staticmethod
def _boolean_value(bool_value):
return 'true' if bool_value else 'false'
def _geocode_params(self, formatted_query):
return {
'key': self.api_key,
}
def _reverse_params(self, position):
return {
'key': self.api_key,
}
def _parse_json(self, resources, exactly_one):
if not resources or not resources['results']:
return None
if exactly_one:
return self._parse_search_result(resources['results'][0])
else:
return [self._parse_search_result(result)
for result in resources['results']]
def _parse_search_result(self, result):
latitude = result['position']['lat']
longitude = result['position']['lon']
return Location(result['address']['freeformAddress'],
(latitude, longitude), result)
def _parse_reverse_json(self, resources, exactly_one):
if not resources or not resources['addresses']:
return None
if exactly_one:
return self._parse_reverse_result(resources['addresses'][0])
else:
return [self._parse_reverse_result(result)
for result in resources['addresses']]
def _parse_reverse_result(self, result):
latitude, longitude = result['position'].split(',')
return Location(result['address']['freeformAddress'],
(latitude, longitude), result)
| mit | 1,211,560,324,719,296,800 | 34.122727 | 102 | 0.588068 | false |
felixsch/trollolo | scripts/graph.py | 1 | 4099 | #!/usr/bin/env python
class Graph:
"Plot various graphs into burndown chart"
def __init__ (self, graph_data):
self.getGraphData(graph_data)
def getGraphData(self, graph_data):
self.x = graph_data['x']
self.y = graph_data['y']
self.xy_extra = 0
self.ymin = graph_data['ymin']
self.ymax = graph_data['ymax']
self.total = graph_data['total']
self.plot_count = graph_data['plot_count']
self.draw_tasks_diff = graph_data['draw_tasks_diff']
self.draw_bonus_tasks_diff = graph_data['draw_bonus_tasks_diff']
if 'x_extra' in graph_data:
self.x_extra = graph_data['x_extra']
self.y_extra = graph_data['y_extra']
self.xy_extra = 1
if self.draw_tasks_diff:
self.x_arrow_start_end = graph_data['x_arrow_start_end']
self.y_arrow_start = graph_data['y_arrow_start']
self.y_arrow_end = graph_data['y_arrow_end']
self.y_text = graph_data['y_text']
if self.draw_bonus_tasks_diff:
self.y_arrow_start_bonus = graph_data['y_arrow_start_bonus']
self.y_arrow_end_bonus = graph_data['y_arrow_end_bonus']
self.y_text_bonus = graph_data['y_text_bonus']
self.bonus_tasks_day_one = graph_data['bonus_tasks_day_one']
self.subplot = graph_data['subplot']
return
def draw(self, y_label, color, marker, linestyle, linewidth, plot):
self.plot = plot
self.subplot.set_ylabel(y_label, color=color)
self.subplot.set_ylim([self.ymin, self.ymax])
if self.plot_count == 1:
self.subplot.tick_params(axis='y', colors=color)
if self.plot_count >= 2:
self.subplot.tick_params(axis='y', colors=color)
self.subplot.spines['right'].set_position(('axes', 1.15))
self.plot.fig.subplots_adjust(right=0.8)
self.subplot.plot(self.x, self.y, color=color, marker=marker, linestyle=linestyle, linewidth=linewidth)
self.drawBonus(color, marker, linestyle, linewidth)
self.drawBars(color)
if self.draw_tasks_diff:
self.drawTasksDiff(color)
if self.draw_bonus_tasks_diff:
self.drawBonusTasksDiff(color)
return
def drawBonus(self, color, marker, linestyle, linewidth):
if self.xy_extra and len(self.x_extra) > 0:
self.subplot.plot(self.x_extra, self.y_extra, color=color, marker=marker, linestyle=linestyle, linewidth=linewidth)
return
def drawBars(self, color):
if len(self.total) > 1:
width = 0.2
offset = 0
if self.plot_count == 1:
offset = -width
new = [0, 0]
for i in range(1, len(self.total)):
new.append(self.total[i] - self.total[i - 1])
additional_days = []
additional = []
for i in range(len(new)):
if new[i] != 0:
additional_days.append(i + offset)
additional.append(new[i])
if len(additional) > 0:
self.subplot.bar(additional_days, additional, width, color=color)
return
def drawTasksDiff(self, color):
tasks_done = self.total[0] - self.y[0]
if tasks_done > 0:
self.subplot.annotate("",
xy=(self.x_arrow_start_end, self.y_arrow_start), xycoords='data',
xytext=(self.x_arrow_start_end, self.y_arrow_end), textcoords='data',
arrowprops=dict(arrowstyle="<|-|>", connectionstyle="arc3", color=color)
)
self.subplot.text(0.7, self.y_text, str(int(tasks_done)) + " tasks done",
rotation='vertical', verticalalignment='top', color=color
)
return
def drawBonusTasksDiff(self, color):
if self.bonus_tasks_day_one:
self.subplot.annotate("",
xy=(self.x_arrow_start_end, self.y_arrow_start_bonus), xycoords='data',
xytext=(self.x_arrow_start_end, self.y_arrow_end_bonus), textcoords='data',
arrowprops=dict(arrowstyle="<|-|>", connectionstyle="arc3", color=color)
)
self.subplot.text(0.4, self.y_text_bonus, str(int(-self.y_extra[0])) + " extra",
rotation='vertical', verticalalignment='center', color=color
)
self.subplot.text(0.7, self.y_text_bonus, "tasks done",
rotation='vertical', verticalalignment='center', color=color
)
return
| gpl-3.0 | 1,062,859,525,571,218,000 | 34.336207 | 121 | 0.634545 | false |
kernsuite-debian/lofar | LCS/MessageBus/src/noqpidfallback.py | 1 | 1097 | #!/usr/bin/env python3
import sys
print("QPID support NOT enabled! Will NOT connect to any broker, and messages will be lost!")
"""
Exceptions.
"""
class ProtonException(Exception):
pass
class SessionError(Exception):
pass
class Timeout(Exception):
pass
"""
Messages.
"""
class Message(object):
def __init__(self, content_type, durable):
self.content = ""
"""
Communication.
"""
class Sender(object):
def __init__(self, dest):
self.dest = dest
def send(self, msg):
pass
class Receiver(object):
def __init__(self, source):
self.capacity = 0
self.source = source
def fetch(self):
return None
class Session(object):
def sender(self, address):
return Sender(address)
def receiver(self, address):
return Receiver(address)
def next_receiver(self, timeout=0):
return Receiver("unknown")
def acknowledge(self, msg):
pass
class Connection(object):
def __init__(self, broker):
self.reconnect = False
def open(self):
pass
def close(self, timeout=0):
pass
def session(self):
return Session()
| gpl-3.0 | 1,013,217,743,745,717,100 | 14.027397 | 93 | 0.654512 | false |
cyberdelia/metrology | metrology/reporter/logger.py | 1 | 3203 | import logging
from metrology.instruments import (
Counter,
Gauge,
Histogram,
Meter,
Timer,
UtilizationTimer
)
from metrology.reporter.base import Reporter
class LoggerReporter(Reporter):
"""
A logging reporter that write metrics to a logger ::
reporter = LoggerReporter(level=logging.DEBUG, interval=10)
reporter.start()
:param logger: logger to use
:param level: logger level
:param interval: time between each reporting
:param prefix: metrics name prefix
"""
def __init__(self, logger=logging, level=logging.INFO, **options):
self.logger = logger
self.level = level
self.prefix = options.get('prefix')
super(LoggerReporter, self).__init__(**options)
def write(self):
for name, metric in self.registry:
if isinstance(metric, Meter):
self.log_metric(name, 'meter', metric, [
'count', 'one_minute_rate', 'five_minute_rate',
'fifteen_minute_rate', 'mean_rate'
])
if isinstance(metric, Gauge):
self.log_metric(name, 'gauge', metric, [
'value'
])
if isinstance(metric, UtilizationTimer):
self.log_metric(name, 'timer', metric, [
'count', 'one_minute_rate', 'five_minute_rate',
'fifteen_minute_rate', 'mean_rate',
'min', 'max', 'mean', 'stddev',
'one_minute_utilization', 'five_minute_utilization',
'fifteen_minute_utilization', 'mean_utilization'
], [
'median', 'percentile_95th'
])
if isinstance(metric, Timer):
self.log_metric(name, 'timer', metric, [
'count', 'total_time', 'one_minute_rate',
'five_minute_rate', 'fifteen_minute_rate', 'mean_rate',
'min', 'max', 'mean', 'stddev'
], [
'median', 'percentile_95th'
])
if isinstance(metric, Counter):
self.log_metric(name, 'counter', metric, [
'count'
])
if isinstance(metric, Histogram):
self.log_metric(name, 'histogram', metric, [
'count', 'min', 'max', 'mean', 'stddev',
], [
'median', 'percentile_95th'
])
def log_metric(self, name, type, metric, keys, snapshot_keys=None):
if snapshot_keys is None:
snapshot_keys = []
messages = []
if self.prefix:
messages.append(self.prefix)
messages.append(name)
messages.append(type)
for name in keys:
messages.append("{0}={1}".format(name, getattr(metric, name)))
if hasattr(metric, 'snapshot'):
snapshot = metric.snapshot
for name in snapshot_keys:
messages.append("{0}={1}".format(name,
getattr(snapshot, name)))
self.logger.log(self.level, " ".join(messages))
| mit | -4,748,470,296,817,511,000 | 33.815217 | 75 | 0.505151 | false |
YoungKwonJo/mlxtend | tests/tests_classifier/test_ensembleclassifier.py | 1 | 2352 | import numpy as np
from mlxtend.classifier import EnsembleClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn import datasets
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_EnsembleClassifier():
np.random.seed(123)
clf1 = LogisticRegression()
clf2 = RandomForestClassifier()
clf3 = GaussianNB()
eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], voting='hard')
scores = cross_validation.cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
scores_mean = (round(scores.mean(), 2))
assert(scores_mean == 0.94)
def test_EnsembleClassifier_weights():
np.random.seed(123)
clf1 = LogisticRegression()
clf2 = RandomForestClassifier()
clf3 = GaussianNB()
eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], voting='soft', weights=[1,2,10])
scores = cross_validation.cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
scores_mean = (round(scores.mean(), 2))
assert(scores_mean == 0.93)
def test_EnsembleClassifier_gridsearch():
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], voting='soft')
params = {'logisticregression__C': [1.0, 100.0],
'randomforestclassifier__n_estimators': [20, 200],}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
mean_scores = []
for params, mean_score, scores in grid.grid_scores_:
mean_scores.append(round(mean_score, 2))
assert(mean_scores == [0.95, 0.96, 0.96, 0.95])
def test_EnsembleClassifier_gridsearch_enumerate_names():
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
eclf = EnsembleClassifier(clfs=[clf1, clf1, clf2], voting='soft')
params = {'logisticregression-1__C': [1.0, 100.0],
'logisticregression-2__C': [1.0, 100.0],
'randomforestclassifier__n_estimators': [5, 20],}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
gs = grid.fit(iris.data, iris.target)
| bsd-3-clause | 1,804,529,685,603,916,500 | 30.783784 | 87 | 0.690051 | false |
danakj/chromium | build/android/method_count.py | 1 | 3994 | #! /usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import os
import re
import shutil
import sys
import tempfile
import zipfile
import devil_chromium
from devil.android.sdk import dexdump
from pylib.constants import host_paths
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common'))
import perf_tests_results_helper # pylint: disable=import-error
# Example dexdump output:
# DEX file header:
# magic : 'dex\n035\0'
# checksum : b664fc68
# signature : ae73...87f1
# file_size : 4579656
# header_size : 112
# link_size : 0
# link_off : 0 (0x000000)
# string_ids_size : 46148
# string_ids_off : 112 (0x000070)
# type_ids_size : 5730
# type_ids_off : 184704 (0x02d180)
# proto_ids_size : 8289
# proto_ids_off : 207624 (0x032b08)
# field_ids_size : 17854
# field_ids_off : 307092 (0x04af94)
# method_ids_size : 33699
# method_ids_off : 449924 (0x06dd84)
# class_defs_size : 2616
# class_defs_off : 719516 (0x0afa9c)
# data_size : 3776428
# data_off : 803228 (0x0c419c)
# For what these mean, refer to:
# https://source.android.com/devices/tech/dalvik/dex-format.html
_CONTRIBUTORS_TO_DEX_CACHE = {'type_ids_size': 'types',
'string_ids_size': 'strings',
'method_ids_size': 'methods',
'field_ids_size': 'fields'}
def _ExtractSizesFromDexFile(dex_path):
counts = {}
for line in dexdump.DexDump(dex_path, file_summary=True):
if not line.strip():
# Each method, type, field, and string contributes 4 bytes (1 reference)
# to our DexCache size.
counts['dex_cache_size'] = (
sum(counts[x] for x in _CONTRIBUTORS_TO_DEX_CACHE)) * 4
return counts
m = re.match(r'([a-z_]+_size) *: (\d+)', line)
if m:
counts[m.group(1)] = int(m.group(2))
raise Exception('Unexpected end of output.')
def _ExtractSizesFromZip(path):
tmpdir = tempfile.mkdtemp(suffix='_dex_extract')
try:
counts = collections.defaultdict(int)
with zipfile.ZipFile(path, 'r') as z:
for subpath in z.namelist():
if not subpath.endswith('.dex'):
continue
extracted_path = z.extract(subpath, tmpdir)
cur_counts = _ExtractSizesFromDexFile(extracted_path)
for k in cur_counts:
counts[k] += cur_counts[k]
return dict(counts)
finally:
shutil.rmtree(tmpdir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--apk-name', help='Name of the APK to which the dexfile corresponds.')
parser.add_argument('dexfile')
args = parser.parse_args()
devil_chromium.Initialize()
if not args.apk_name:
dirname, basename = os.path.split(args.dexfile)
while basename:
if 'apk' in basename:
args.apk_name = basename
break
dirname, basename = os.path.split(dirname)
else:
parser.error(
'Unable to determine apk name from %s, '
'and --apk-name was not provided.' % args.dexfile)
if os.path.splitext(args.dexfile)[1] in ('.zip', '.apk', '.jar'):
sizes = _ExtractSizesFromZip(args.dexfile)
else:
sizes = _ExtractSizesFromDexFile(args.dexfile)
def print_result(name, value_key, description=None):
perf_tests_results_helper.PrintPerfResult(
'%s_%s' % (args.apk_name, name), 'total', [sizes[value_key]],
description or name)
for dex_header_name, readable_name in _CONTRIBUTORS_TO_DEX_CACHE.iteritems():
print_result(readable_name, dex_header_name)
print_result(
'DexCache_size', 'dex_cache_size', 'bytes of permanent dirty memory')
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -6,493,615,248,931,155,000 | 29.96124 | 80 | 0.623185 | false |
dominicrodger/tinyblog | tests/utils.py | 1 | 1274 | from datetime import datetime
import django
from django.core.management import call_command
import factory
from StringIO import StringIO
import sys
from tinyblog.models import Post, EmailSubscriber
class PostFactory(factory.Factory):
FACTORY_FOR = Post
title = factory.Sequence(lambda n: 'Post %s' % n)
slug = factory.Sequence(lambda n: 'post-%s' % n)
created = datetime(2013, 1, 1, 7, 0, 0)
class EmailSubscriberFactory(factory.Factory):
FACTORY_FOR = EmailSubscriber
email = '[email protected]'
class OutputRedirector(object):
def __init__(self, obj):
self.obj = obj
def __enter__(self):
self.original_stdout = sys.stdout
self.original_stderr = sys.stderr
sys.stdout = self.obj
sys.stderr = self.obj
def __exit__(self, type, value, traceback):
sys.stdout = self.original_stdout
sys.stderr = self.original_stderr
def run_command_for_test(command, *args):
content = StringIO()
with OutputRedirector(content):
call_command(command, *args, stdout=content, stderr=content)
content.seek(0)
return content.read().strip()
def is_before_django_1_5():
version = django.VERSION
if version[0] > 1:
return False
return version[1] < 5
| bsd-3-clause | -1,978,692,103,374,146,600 | 22.163636 | 68 | 0.666405 | false |
mclinux/mclinux-packages | external-packages/apt-notifier/src/usr/bin/apt-notifier.py | 1 | 6927 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import subprocess
import sys
import os
import tempfile
from os import environ
from PyQt4 import QtGui
from PyQt4 import QtCore
rc_file_name = environ.get('HOME') + '/.kde/share/config/apt-notifierrc'
message_status = "not displayed"
# Check for updates, using subprocess.Popen
def check_updates():
global message_status
#Create an inline script (what used to be /usr/bin/apt-notifier-check-Updates) and then run it to get the number of updates.
script = '''#!/bin/sh
sorted_list_of_upgrades()
{
#Create a sorted list of the names of the packages that are upgradeable.
LC_ALL=en_US apt-get -o 'Debug::NoLocking=true' --trivial-only -V upgrade 2>/dev/null \
| grep -v -e 'lists...' \
-e 'dependency tree' \
-e 'information...' \
-e 'back:' \
-e 'upgraded:' \
-e 'upgraded.' \
-e 'archives.' \
-e 'used.'\
-e 'freed.'\
| awk '{ print $1 }' | sort
}
if [ -s /var/lib/synaptic/preferences ];
then
#/var/lib/synaptic/preferences is a non-zero size file, which means there are packages pinned in Synaptic.
#Remove from the sorted_list_of_upgrades, packages that are pinned in Synaptic, and then get a count of remaining.
sorted_list_of_upgrades|grep -vx $(cat /var/lib/synaptic/preferences|grep 'Package:'|sed -e 's/Package: /-e /g')|wc -l
else
#/var/lib/synaptic/preferences is either a zero byte file, meaning packages were pinned in Synaptic at some time in
# the past but none are currently pinned. Or the file is not present, meaning packages have never been pinned using
# Synaptic. In either case, just get a count of how many upgradeable packages are in the list.
sorted_list_of_upgrades|wc -l
fi
'''
script_file = tempfile.NamedTemporaryFile('wt')
script_file.write(script)
script_file.flush()
run = subprocess.Popen(["echo -n `sh %s` new updates available" % script_file.name],shell=True, stdout=subprocess.PIPE)
# Read the output into a text string
text = run.stdout.read(128)
# Alter both Icon and Tooltip, depending on updates available or not
if text.startswith( "0" ):
message_status = "not displayed" # Resets flag once there are no more updates
add_hide_action()
if icon_config != "show":
AptIcon.hide()
else:
AptIcon.setIcon(NoUpdatesIcon)
AptIcon.setToolTip(text)
else:
AptIcon.setIcon(NewUpdatesIcon)
AptIcon.show()
AptIcon.setToolTip(text)
add_upgrade_action()
# Shows the pop up message only if not displayed before
if message_status == "not displayed":
def show_message():
AptIcon.showMessage("Updates", "You have " + text)
Timer.singleShot(1000, show_message)
message_status = "displayed"
# Define the command to run when clicking Tray Icon
def start_synaptic():
run = subprocess.Popen(['/usr/bin/su-to-root -X -c synaptic'],shell=True).wait()
check_updates()
def upgrade():
script = '''#!/bin/sh
/usr/bin/su-to-root -X -c "x-terminal-emulator -e apt-get upgrade"
PID=`pidof apt-get| cut -f 1 -d " "`
if [ $PID ]; then
while (ps -p $PID > /dev/null); do
sleep 5
done
fi
'''
script_file = tempfile.NamedTemporaryFile('wt')
script_file.write(script)
script_file.flush()
run = subprocess.Popen(['sh %s' % script_file.name],shell=True).wait()
check_updates()
# Define the action on clicking Tray Icon
def start_synaptic_activated(reason):
if reason == QtGui.QSystemTrayIcon.Trigger:
start_synaptic()
def read_icon_config():
"""Reads ~/.kde/share/config/apt-notifierrc, returns 'show' if file doesn't exist or does not contain DontShowIcon"""
command_string = "cat " + rc_file_name + " | grep -q DontShowIcon"
exit_state = subprocess.call([command_string], shell=True, stdout=subprocess.PIPE)
if exit_state != 0:
return "show"
def set_noicon():
"""Reads ~/.kde/share/config/apt-notifierrc. If "DontShowIcon blah blah blah" is already there, don't write it again"""
command_string = "cat " + rc_file_name + " | grep -q DontShowIcon"
exit_state = subprocess.call([command_string], shell=True, stdout=subprocess.PIPE)
if exit_state != 0:
file = open(rc_file_name, 'a')
file.write ('[DontShowIcon] #Remove this entry if you want the apt-notify icon to show even when there are no upgrades available\n')
file.close()
AptIcon.hide()
icon_config = "donot show"
def add_upgrade_action():
ActionsMenu.clear()
upgrade_action = ActionsMenu.addAction("Upgrade all packages")
AptNotify.connect(upgrade_action, QtCore.SIGNAL("triggered()"), upgrade)
add_quit_action()
def add_hide_action():
ActionsMenu.clear()
if icon_config == "show":
hide_action = ActionsMenu.addAction("Hide until updates available")
AptNotify.connect(hide_action,QtCore.SIGNAL("triggered()"),set_noicon)
add_quit_action()
def add_quit_action():
ActionsMenu.addSeparator()
quit_action = ActionsMenu.addAction(QuitIcon,"Quit Apt-Notification")
AptNotify.connect(quit_action, QtCore.SIGNAL("triggered()"), AptNotify.exit)
# General application code
def main():
# Define Core objects, Tray icon and QTimer
global AptNotify
global AptIcon
global QuitIcon
global icon_config
global upgrade_action
global quit_action
global Timer
AptNotify = QtGui.QApplication(sys.argv)
AptIcon = QtGui.QSystemTrayIcon()
Timer = QtCore.QTimer()
icon_config = read_icon_config()
# Define the icons:
global NoUpdatesIcon
global NewUpdatesIcon
NoUpdatesIcon = QtGui.QIcon("/usr/share/icons/mnotify-none.png")
NewUpdatesIcon = QtGui.QIcon("/usr/share/icons/mnotify-some.png")
QuitIcon = QtGui.QIcon("/usr/share/icons/oxygen/22x22/actions/system-shutdown.png")
# Create the right-click menu and add the Tooltip text
global ActionsMenu
ActionsMenu = QtGui.QMenu()
AptIcon.connect( AptIcon, QtCore.SIGNAL( "activated(QSystemTrayIcon::ActivationReason)" ), start_synaptic_activated)
AptNotify.connect(Timer, QtCore.SIGNAL("timeout()"), check_updates)
# Integrate it together,apply checking of updated packages and set timer to every 5 minutes (1 second = 1000)
check_updates()
AptIcon.setContextMenu(ActionsMenu)
if icon_config == "show":
AptIcon.show()
Timer.start(300000)
if AptNotify.isSessionRestored():
sys.exit(1)
sys.exit(AptNotify.exec_())
if __name__ == '__main__':
main()
| gpl-3.0 | 7,291,835,631,016,373,000 | 38.582857 | 140 | 0.640393 | false |
digitalocean/netbox | netbox/tenancy/forms.py | 1 | 3588 | from django import forms
from extras.forms import (
AddRemoveTagsForm, CustomFieldModelForm, CustomFieldBulkEditForm, CustomFieldFilterForm, CustomFieldModelCSVForm,
)
from extras.models import Tag
from utilities.forms import (
BootstrapMixin, CommentField, CSVModelChoiceField, CSVModelForm, DynamicModelChoiceField,
DynamicModelMultipleChoiceField, SlugField, TagFilterField,
)
from .models import Tenant, TenantGroup
#
# Tenant groups
#
class TenantGroupForm(BootstrapMixin, forms.ModelForm):
parent = DynamicModelChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
slug = SlugField()
class Meta:
model = TenantGroup
fields = [
'parent', 'name', 'slug', 'description',
]
class TenantGroupCSVForm(CSVModelForm):
parent = CSVModelChoiceField(
queryset=TenantGroup.objects.all(),
required=False,
to_field_name='name',
help_text='Parent group'
)
slug = SlugField()
class Meta:
model = TenantGroup
fields = TenantGroup.csv_headers
#
# Tenants
#
class TenantForm(BootstrapMixin, CustomFieldModelForm):
slug = SlugField()
group = DynamicModelChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Tenant
fields = (
'name', 'slug', 'group', 'description', 'comments', 'tags',
)
class TenantCSVForm(CustomFieldModelCSVForm):
slug = SlugField()
group = CSVModelChoiceField(
queryset=TenantGroup.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned group'
)
class Meta:
model = Tenant
fields = Tenant.csv_headers
class TenantBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Tenant.objects.all(),
widget=forms.MultipleHiddenInput()
)
group = DynamicModelChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
class Meta:
nullable_fields = [
'group',
]
class TenantFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Tenant
q = forms.CharField(
required=False,
label='Search'
)
group = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
to_field_name='slug',
required=False,
null_option='None'
)
tag = TagFilterField(model)
#
# Form extensions
#
class TenancyForm(forms.Form):
tenant_group = DynamicModelChoiceField(
queryset=TenantGroup.objects.all(),
required=False,
null_option='None',
initial_params={
'tenants': '$tenant'
}
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
query_params={
'group_id': '$tenant_group'
}
)
class TenancyFilterForm(forms.Form):
tenant_group = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
to_field_name='slug',
required=False,
null_option='None'
)
tenant = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
to_field_name='slug',
required=False,
null_option='None',
query_params={
'group': '$tenant_group'
}
)
| apache-2.0 | 2,559,112,276,505,185,000 | 22.761589 | 117 | 0.632664 | false |
TAMU-CPT/galaxy-tools | tools/fasta/fasta_dedup.py | 1 | 1258 | #!/usr/bin/env python
import logging
import copy
import argparse
import StringIO
import hashlib
from Bio import SeqIO
logging.basicConfig(level=logging.INFO)
def dedup(fasta_file, mutation="mutate"):
records = list(SeqIO.parse(fasta_file, "fasta"))
output = StringIO.StringIO()
known_records = {}
ordering_keys = []
for record in records:
seq = str(record.seq).upper()
md5 = hashlib.md5(seq).hexdigest()
if md5 in known_records:
known_records[md5]["keys"].append(record.id)
else:
known_records[md5] = {"keys": [], "rec": copy.deepcopy(record)}
ordering_keys.append(md5)
for key in ordering_keys:
if len(known_records[key]["keys"]) > 0:
ident_str = ", ".join(known_records[key]["keys"])
known_records[key]["rec"].description += " [Ident: %s]" % ident_str
SeqIO.write(known_records[key]["rec"], output, "fasta")
print(output.getvalue())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="remove duplicate sequences in a fasta file"
)
parser.add_argument("fasta_file", type=argparse.FileType("r"), help="Fasta file")
args = parser.parse_args()
dedup(**vars(args))
| gpl-3.0 | -5,770,532,780,688,824,000 | 27.590909 | 85 | 0.617647 | false |
FenuaTabu/efenua | actions.py | 1 | 1708 | import csv
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from efenua.decorators import action
def make_export_as_csv(fields=None, exclude=None, header=True):
"""
This function returns an export csv action
'fields' and 'exclude' work like in django ModelForm
'header' is whether or not to output the column names as the first row
"""
from itertools import chain
@action(_('Export CSV'), _('Export CSV'))
def export_as_csvAction(modeladmin, request, queryset):
"""
Generic csv export admin action.
"""
opts = modeladmin.model._meta
field_names = set([field.name for field in opts.fields])
many_to_many_field_names = set([many_to_many_field.name for many_to_many_field in opts.many_to_many])
if fields:
fieldset = set(fields)
field_names = field_names & fieldset
elif exclude:
excludeset = set(exclude)
field_names = field_names - excludeset
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % str(opts).replace('.', '_')
writer = csv.writer(response)
if header:
writer.writerow(list(chain(field_names, many_to_many_field_names)))
for obj in queryset:
row = []
for field in field_names:
row.append(getattr(obj, field))
for field in many_to_many_field_names:
row.append(getattr(obj, field).all())
writer.writerow(row)
return response
return export_as_csvAction
| gpl-3.0 | 82,911,949,500,981,570 | 35.130435 | 109 | 0.604801 | false |
berkmancenter/mediacloud | apps/topics-base/tests/python/topics_base/test_messages.py | 1 | 1355 | from topics_base.messages import TopicSpiderUpdateMessage
def test_topic_spider_update_email():
topic_name = 'Foo Bar Baz & <script></script>'
topic_url = 'https://topics.com/topic?ab=cd&ef=gh'
topic_spider_status = 'Abc def & <script></script>'
message = TopicSpiderUpdateMessage(
to='[email protected]',
topic_name=topic_name,
topic_url=topic_url,
topic_spider_status=topic_spider_status,
)
assert message.subject is not None
assert len(message.subject) > 3
assert '{{' not in message.subject # no Jinja2 variable placeholders
assert message.text_body is not None
assert len(message.text_body) > 100
assert message.html_body is not None
assert len(message.html_body) > 100
assert topic_name in message.text_body
assert topic_url in message.text_body
assert topic_spider_status in message.text_body
assert topic_name not in message.html_body # should be escaped
assert 'Foo Bar Baz & <script></script>' in message.html_body
assert topic_url not in message.html_body # should be escaped
assert 'https://topics.com/topic?ab=cd&ef=gh' in message.html_body
assert topic_spider_status not in message.html_body # should be escaped
assert 'Abc def & <script></script>' in message.html_body
| agpl-3.0 | 1,322,631,432,139,391,200 | 35.621622 | 81 | 0.695203 | false |
citrix-openstack-build/neutron-lbaas | neutron_lbaas/tests.skip/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py | 1 | 25382 | # Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.common import exceptions
from neutron.tests import base
from neutron_lbaas.services.loadbalancer.drivers.haproxy \
import namespace_driver
class TestHaproxyNSDriver(base.BaseTestCase):
def setUp(self):
super(TestHaproxyNSDriver, self).setUp()
conf = mock.Mock()
conf.haproxy.loadbalancer_state_path = '/the/path'
conf.interface_driver = 'intdriver'
conf.haproxy.user_group = 'test_group'
conf.haproxy.send_gratuitous_arp = 3
conf.AGENT.root_helper = 'sudo_test'
self.conf = conf
self.mock_importer = mock.patch.object(namespace_driver,
'importutils').start()
self.rpc_mock = mock.Mock()
self.driver = namespace_driver.HaproxyNSDriver(
conf,
self.rpc_mock
)
self.vif_driver = mock.Mock()
self.driver.vif_driver = self.vif_driver
self.fake_config = {
'pool': {'id': 'pool_id', 'status': 'ACTIVE',
'admin_state_up': True},
'vip': {'id': 'vip_id', 'port': {'id': 'port_id'},
'status': 'ACTIVE', 'admin_state_up': True}
}
def test_get_name(self):
self.assertEqual(self.driver.get_name(), namespace_driver.DRIVER_NAME)
def test_create(self):
with mock.patch.object(self.driver, '_plug') as plug:
with mock.patch.object(self.driver, '_spawn') as spawn:
self.driver.create(self.fake_config)
plug.assert_called_once_with(
'qlbaas-pool_id', {'id': 'port_id'}
)
spawn.assert_called_once_with(self.fake_config)
def test_update(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch.object(self.driver, '_spawn'),
mock.patch('__builtin__.open')
) as (gsp, spawn, mock_open):
mock_open.return_value = ['5']
self.driver.update(self.fake_config)
mock_open.assert_called_once_with(gsp.return_value, 'r')
spawn.assert_called_once_with(self.fake_config, ['-sf', '5'])
def test_spawn(self):
with contextlib.nested(
mock.patch.object(namespace_driver.hacfg, 'save_config'),
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
) as (mock_save, gsp, ip_wrap):
gsp.side_effect = lambda x, y: y
self.driver._spawn(self.fake_config)
mock_save.assert_called_once_with('conf', self.fake_config,
'sock', 'test_group')
cmd = ['haproxy', '-f', 'conf', '-p', 'pid']
ip_wrap.assert_has_calls([
mock.call('sudo_test', 'qlbaas-pool_id'),
mock.call().netns.execute(cmd)
])
def test_undeploy_instance(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch.object(namespace_driver, 'kill_pids_in_file'),
mock.patch.object(self.driver, '_unplug'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
mock.patch('os.path.isdir'),
mock.patch('shutil.rmtree')
) as (gsp, kill, unplug, ip_wrap, isdir, rmtree):
gsp.side_effect = lambda x, y: '/pool/' + y
self.driver.pool_to_port_id['pool_id'] = 'port_id'
isdir.return_value = True
self.driver.undeploy_instance('pool_id')
kill.assert_called_once_with('sudo_test', '/pool/pid')
unplug.assert_called_once_with('qlbaas-pool_id', 'port_id')
isdir.assert_called_once_with('/pool')
rmtree.assert_called_once_with('/pool')
ip_wrap.assert_has_calls([
mock.call('sudo_test', 'qlbaas-pool_id'),
mock.call().garbage_collect_namespace()
])
def test_undeploy_instance_with_ns_cleanup(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch.object(self.driver, 'vif_driver'),
mock.patch.object(namespace_driver, 'kill_pids_in_file'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
mock.patch('os.path.isdir'),
mock.patch('shutil.rmtree')
) as (gsp, vif, kill, ip_wrap, isdir, rmtree):
device = mock.Mock()
device_name = 'port_device'
device.name = device_name
ip_wrap.return_value.get_devices.return_value = [device]
self.driver.undeploy_instance('pool_id', cleanup_namespace=True)
vif.unplug.assert_called_once_with(device_name,
namespace='qlbaas-pool_id')
def test_remove_orphans(self):
with contextlib.nested(
mock.patch.object(self.driver, 'exists'),
mock.patch.object(self.driver, 'undeploy_instance'),
mock.patch('os.listdir'),
mock.patch('os.path.exists')
) as (exists, undeploy, listdir, path_exists):
known = ['known1', 'known2']
unknown = ['unknown1', 'unknown2']
listdir.return_value = known + unknown
exists.side_effect = lambda x: x == 'unknown2'
self.driver.remove_orphans(known)
undeploy.assert_called_once_with('unknown2',
cleanup_namespace=True)
def test_exists(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
mock.patch('socket.socket'),
mock.patch('os.path.exists'),
) as (gsp, ip_wrap, socket, path_exists):
gsp.side_effect = lambda x, y, z: '/pool/' + y
ip_wrap.return_value.netns.exists.return_value = True
path_exists.return_value = True
self.driver.exists('pool_id')
ip_wrap.assert_has_calls([
mock.call('sudo_test'),
mock.call().netns.exists('qlbaas-pool_id')
])
self.assertTrue(self.driver.exists('pool_id'))
def test_get_stats(self):
raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,'
'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,'
'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,'
'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,'
'check_status,check_code,check_duration,hrsp_1xx,'
'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n'
'8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,'
'10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0'
',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n'
'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
'32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,'
'224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,'
'1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n'
'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
'd9aea044-8867-4e80-9875-16fb808fa0f9,0,0,0,2,,12,0,0,,'
'0,,0,0,8,4,DOWN,1,1,0,9,2,308,675,,1,2,2,,4,,2,0,,2,'
'L4CON,,2999,0,0,0,0,0,0,0,,,,0,0,\n')
raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,'
'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,'
'status,weight,act,bck,chkfail,chkdown,lastchg,'
'downtime,qlimit,pid,iid,sid,throttle,lbtot,'
'tracked,type,rate,rate_lim,rate_max,check_status,'
'check_code,check_duration,hrsp_1xx,hrsp_2xx,'
'hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,'
'\n')
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch('socket.socket'),
mock.patch('os.path.exists'),
) as (gsp, socket, path_exists):
gsp.side_effect = lambda x, y, z: '/pool/' + y
path_exists.return_value = True
socket.return_value = socket
socket.recv.return_value = raw_stats
exp_stats = {'connection_errors': '0',
'active_connections': '3',
'current_sessions': '3',
'bytes_in': '7764',
'max_connections': '4',
'max_sessions': '4',
'bytes_out': '2365',
'response_errors': '0',
'total_sessions': '10',
'total_connections': '10',
'members': {
'32a6c2a3-420a-44c3-955d-86bd2fc6871e': {
'status': 'ACTIVE',
'health': 'L7OK',
'failed_checks': '0'
},
'd9aea044-8867-4e80-9875-16fb808fa0f9': {
'status': 'INACTIVE',
'health': 'L4CON',
'failed_checks': '9'
}
}
}
stats = self.driver.get_stats('pool_id')
self.assertEqual(exp_stats, stats)
socket.recv.return_value = raw_stats_empty
self.assertEqual({'members': {}}, self.driver.get_stats('pool_id'))
path_exists.return_value = False
socket.reset_mock()
self.assertEqual({}, self.driver.get_stats('pool_id'))
self.assertFalse(socket.called)
def test_plug(self):
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1'}}]}
with contextlib.nested(
mock.patch('neutron.agent.linux.ip_lib.device_exists'),
mock.patch('netaddr.IPNetwork'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
) as (dev_exists, ip_net, ip_wrap):
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port)
self.rpc_mock.plug_vip_port.assert_called_once_with(
test_port['id'])
self.assertTrue(dev_exists.called)
self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
'test_interface',
'mac_addr',
namespace='test_ns')
self.vif_driver.init_l3.assert_called_once_with(
'test_interface',
['10.0.0.2/24'],
namespace='test_ns'
)
cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
cmd_arping = ['arping', '-U', '-I',
'test_interface', '-c',
self.conf.haproxy.send_gratuitous_arp, '10.0.0.2']
ip_wrap.assert_has_calls([
mock.call('sudo_test', namespace='test_ns'),
mock.call().netns.execute(cmd, check_exit_code=False),
mock.call().netns.execute(cmd_arping, check_exit_code=False),
])
dev_exists.return_value = True
self.assertRaises(exceptions.PreexistingDeviceFailure,
self.driver._plug, 'test_ns', test_port, False)
def test_plug_not_send_gratuitous_arp(self):
self.conf.haproxy.send_gratuitous_arp = 0
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1'}}]}
with contextlib.nested(
mock.patch('neutron.agent.linux.ip_lib.device_exists'),
mock.patch('netaddr.IPNetwork'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
) as (dev_exists, ip_net, ip_wrap):
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port)
cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
expected = [
mock.call('sudo_test', namespace='test_ns'),
mock.call().netns.execute(cmd, check_exit_code=False)]
self.assertEqual(expected, ip_wrap.mock_calls)
def test_plug_no_gw(self):
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24'}}]}
with contextlib.nested(
mock.patch('neutron.agent.linux.ip_lib.device_exists'),
mock.patch('netaddr.IPNetwork'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
) as (dev_exists, ip_net, ip_wrap):
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port)
self.rpc_mock.plug_vip_port.assert_called_once_with(
test_port['id'])
self.assertTrue(dev_exists.called)
self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
'test_interface',
'mac_addr',
namespace='test_ns')
self.vif_driver.init_l3.assert_called_once_with(
'test_interface',
['10.0.0.2/24'],
namespace='test_ns'
)
self.assertFalse(ip_wrap.called)
dev_exists.return_value = True
self.assertRaises(exceptions.PreexistingDeviceFailure,
self.driver._plug, 'test_ns', test_port, False)
def test_plug_gw_in_host_routes(self):
test_port = {'id': 'port_id',
'network_id': 'net_id',
'mac_address': 'mac_addr',
'fixed_ips': [{'ip_address': '10.0.0.2',
'subnet': {'cidr': '10.0.0.0/24',
'host_routes':
[{'destination': '0.0.0.0/0',
'nexthop': '10.0.0.1'}]}}]}
with contextlib.nested(
mock.patch('neutron.agent.linux.ip_lib.device_exists'),
mock.patch('netaddr.IPNetwork'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
) as (dev_exists, ip_net, ip_wrap):
self.vif_driver.get_device_name.return_value = 'test_interface'
dev_exists.return_value = False
ip_net.return_value = ip_net
ip_net.prefixlen = 24
self.driver._plug('test_ns', test_port)
self.rpc_mock.plug_vip_port.assert_called_once_with(
test_port['id'])
self.assertTrue(dev_exists.called)
self.vif_driver.plug.assert_called_once_with('net_id', 'port_id',
'test_interface',
'mac_addr',
namespace='test_ns')
self.vif_driver.init_l3.assert_called_once_with(
'test_interface',
['10.0.0.2/24'],
namespace='test_ns'
)
cmd = ['route', 'add', 'default', 'gw', '10.0.0.1']
ip_wrap.assert_has_calls([
mock.call('sudo_test', namespace='test_ns'),
mock.call().netns.execute(cmd, check_exit_code=False),
])
def test_unplug(self):
self.vif_driver.get_device_name.return_value = 'test_interface'
self.driver._unplug('test_ns', 'port_id')
self.rpc_mock.unplug_vip_port.assert_called_once_with('port_id')
self.vif_driver.unplug('test_interface', namespace='test_ns')
def test_kill_pids_in_file(self):
with contextlib.nested(
mock.patch('os.path.exists'),
mock.patch('__builtin__.open'),
mock.patch('neutron.agent.linux.utils.execute'),
mock.patch.object(namespace_driver.LOG, 'exception'),
) as (path_exists, mock_open, mock_execute, mock_log):
file_mock = mock.MagicMock()
mock_open.return_value = file_mock
file_mock.__enter__.return_value = file_mock
file_mock.__iter__.return_value = iter(['123'])
path_exists.return_value = False
namespace_driver.kill_pids_in_file('sudo_test', 'test_path')
path_exists.assert_called_once_with('test_path')
self.assertFalse(mock_open.called)
self.assertFalse(mock_execute.called)
path_exists.return_value = True
mock_execute.side_effect = RuntimeError
namespace_driver.kill_pids_in_file('sudo_test', 'test_path')
self.assertTrue(mock_log.called)
mock_execute.assert_called_once_with(
['kill', '-9', '123'], 'sudo_test')
def test_get_state_file_path(self):
with mock.patch('os.makedirs') as mkdir:
path = self.driver._get_state_file_path('pool_id', 'conf')
self.assertEqual('/the/path/pool_id/conf', path)
mkdir.assert_called_once_with('/the/path/pool_id', 0o755)
def test_deploy_instance(self):
with mock.patch.object(self.driver, 'exists') as exists:
with mock.patch.object(self.driver, 'update') as update:
self.driver.deploy_instance(self.fake_config)
exists.assert_called_once_with(self.fake_config['pool']['id'])
update.assert_called_once_with(self.fake_config)
def test_deploy_instance_non_existing(self):
with mock.patch.object(self.driver, 'exists') as exists:
with mock.patch.object(self.driver, 'create') as create:
exists.return_value = False
self.driver.deploy_instance(self.fake_config)
exists.assert_called_once_with(self.fake_config['pool']['id'])
create.assert_called_once_with(self.fake_config)
def test_deploy_instance_vip_status_non_active(self):
with mock.patch.object(self.driver, 'exists') as exists:
self.fake_config['vip']['status'] = 'NON_ACTIVE'
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_deploy_instance_vip_admin_state_down(self):
with mock.patch.object(self.driver, 'exists') as exists:
self.fake_config['vip']['admin_state_up'] = False
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_deploy_instance_no_vip(self):
with mock.patch.object(self.driver, 'exists') as exists:
del self.fake_config['vip']
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_deploy_instance_pool_status_non_active(self):
with mock.patch.object(self.driver, 'exists') as exists:
self.fake_config['pool']['status'] = 'NON_ACTIVE'
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_deploy_instance_pool_admin_state_down(self):
with mock.patch.object(self.driver, 'exists') as exists:
self.fake_config['pool']['admin_state_up'] = False
self.driver.deploy_instance(self.fake_config)
self.assertFalse(exists.called)
def test_refresh_device(self):
with mock.patch.object(self.driver, 'deploy_instance') as deploy:
pool_id = 'pool_id1'
self.driver._refresh_device(pool_id)
self.rpc_mock.get_logical_device.assert_called_once_with(pool_id)
deploy.assert_called_once_with(
self.rpc_mock.get_logical_device.return_value)
def test_create_vip(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.create_vip({'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_update_vip(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.update_vip({}, {'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_delete_vip(self):
with mock.patch.object(self.driver, 'undeploy_instance') as undeploy:
self.driver.delete_vip({'pool_id': '1'})
undeploy.assert_called_once_with('1')
def test_create_pool(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.create_pool({'id': '1'})
self.assertFalse(refresh.called)
def test_update_pool(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.update_pool({}, {'id': '1'})
refresh.assert_called_once_with('1')
def test_delete_pool_existing(self):
with mock.patch.object(self.driver, 'undeploy_instance') as undeploy:
with mock.patch.object(self.driver, 'exists') as exists:
exists.return_value = True
self.driver.delete_pool({'id': '1'})
undeploy.assert_called_once_with('1')
def test_delete_pool_non_existing(self):
with mock.patch.object(self.driver, 'undeploy_instance') as undeploy:
with mock.patch.object(self.driver, 'exists') as exists:
exists.return_value = False
self.driver.delete_pool({'id': '1'})
self.assertFalse(undeploy.called)
def test_create_member(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.create_member({'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_update_member(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.update_member({}, {'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_delete_member(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.delete_member({'pool_id': '1'})
refresh.assert_called_once_with('1')
def test_create_pool_health_monitor(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.create_pool_health_monitor('', '1')
refresh.assert_called_once_with('1')
def test_update_pool_health_monitor(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.update_pool_health_monitor('', '', '1')
refresh.assert_called_once_with('1')
def test_delete_pool_health_monitor(self):
with mock.patch.object(self.driver, '_refresh_device') as refresh:
self.driver.delete_pool_health_monitor('', '1')
refresh.assert_called_once_with('1')
| apache-2.0 | 1,644,482,578,730,479,000 | 45.149091 | 79 | 0.528879 | false |
amwelch-oss/RedditStats | tests/tests.py | 1 | 8961 | import unittest
from mock import Mock
import random
import string
import arrow
import redditstats.connect as connect
import redditstats.subreddit as subreddit
import redditstats.comments as comments
import redditstats.stats as stats
from collections import namedtuple
class ConnectionTests(unittest.TestCase):
MAX_UA_LEN = 4096
def test_user_agent(self):
'''
Tests that we properly construct the user-agent string.
'''
#Given
test_config_fields = [
("VERSION", "1.0"),
("NAME", "FOO"),
("DESCRIPTION", "THIS IS A THING"),
("REDDIT_USER", "BAR")
]
MockedConfigContainer = namedtuple('config', [x[0] for x in test_config_fields])
mocked_config = MockedConfigContainer(*[x[1] for x in test_config_fields])
#When
ua = connect.form_user_agent(config_object = mocked_config)
#Then
for field in test_config_fields:
self.assertTrue(field[1] in ua)
self.assertTrue(len(ua) < self.MAX_UA_LEN)
class CommentTests(unittest.TestCase):
def test_get_comment_info(self):
'''
Tests that given a comment object returns a dict
with stats about it
'''
#Given
mocked_comment = Mock()
mocked_comment.author = 'foo'
mocked_comment.ups = 3
mocked_comment.downs = 1
mocked_comment.created = 123.0
#When
info = comments.get_comment_info(mocked_comment)
#then
self.assertEquals(info['author'], mocked_comment.author)
self.assertEquals(info['score'], mocked_comment.ups - mocked_comment.downs)
self.assertEquals(info['ts'], mocked_comment.created)
def test_get_submission_comment_summary(self):
'''
Tests the simple case that given a submission object with comments
get_submission_comment_summary correctly returns a dict summarizing
the comments in a submission
'''
#Given
sub_id = 'baz'
true_values = {}
mocked_submission = Mock()
mocked_submission.replace_more_comments.return_value = None
mocked_submission.id = sub_id
true_values[sub_id] = {}
true_values[sub_id]['count'] = 0
true_values[sub_id]['score'] = 0
true_values[sub_id]['total_len'] = 0
mocked_comments = []
for a in ['foo', 'bar']:
for i in range(random.randint(0,10)):
ups = random.randint(0,100)
downs = random.randint(0,10)
created = random.randint(0,10000)
body = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(random.randint(1,200)))
true_values[sub_id]['count'] += 1
true_values[sub_id]['score'] += ups - downs
true_values[sub_id]['total_len'] += len(body)
comment = Mock()
comment.author = a
comment.ups = ups
comment.downs = downs
comment.created = created
comment.body = body
mocked_comments.append(comment)
mocked_submission.comments = mocked_comments
#When
summary = comments.get_submission_comment_summary(mocked_submission)
#Then
self.assertEquals(summary, true_values)
def test_get_user_comment_summary(self):
'''
Tests the simple case that given a submission object with comments
get_user_comment_summary correctly returns a dict of user activity
'''
#Given
true_values = {}
mocked_submission = Mock()
mocked_submission.replace_more_comments.return_value = None
mocked_comments = []
for a in ['foo', 'bar']:
true_values[a] = {}
true_values[a]['count'] = 0
true_values[a]['score'] = 0
true_values[a]['total_len'] = 0
for i in range(random.randint(0,10)):
ups = random.randint(0,100)
downs = random.randint(0,10)
created = random.randint(0,10000)
body = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(random.randint(1,200)))
true_values[a]['count'] += 1
true_values[a]['score'] += ups - downs
true_values[a]['total_len'] += len(body)
comment = Mock()
comment.author = a
comment.ups = ups
comment.downs = downs
comment.created = created
comment.body = body
mocked_comments.append(comment)
mocked_submission.comments = mocked_comments
#When
summary = comments.get_user_comment_summary(mocked_submission)
#Then
self.assertEquals(summary, true_values)
class SubredditTests(unittest.TestCase):
def test_get_submission_by_date(self):
'''
Given a list of submissions tests that get_submission_by_date
returns only the submissions in the given interval
'''
#Given
mocked_submissions = []
interval_start = arrow.get('2015-02-05 00:00')
interval_end = interval_start.replace(hours=24)
generated_dates_start = interval_start.replace(hours=-24)
generated_dates_end = interval_end.replace(hours=24)
generated_dates = generated_dates_start.range('hour', generated_dates_start, end=generated_dates_end)
interval_dates = interval_start.range('hour', interval_start, end=interval_end)
for date in generated_dates:
submission = Mock()
submission.created = date.format('X')
mocked_submissions.append(submission)
conn = Mock()
conn.get_subreddit.return_value = Mock()
conn.get_subreddit.return_value.get_new.return_value = mocked_submissions
end_str = interval_end.strftime('%Y-%m-%d %H:%M')
start_str = interval_start.strftime('%Y-%m-%d %H:%M')
#When
submissions = subreddit.get_submission_by_date(conn, 'foo', end = end_str, start=start_str)
#Then
self.assertEquals(len(submissions), len(interval_dates))
def test_get_subreddit(self):
'''
Tests that get_subreddit makes the correct call to praw
'''
#Given
conn = Mock()
conn.get_subreddit.return_value = True
#When
ret = subreddit.get_subreddit(conn, 'foo')
#Then
self.assertTrue(ret)
def test_get_posts(self):
'''
Tests that get_posts makes the correct call to praw
'''
#Given
conn = Mock()
conn.get_subreddit.return_value = Mock()
conn.get_subreddit.return_value.get_new.return_value = True
#When
ret = subreddit.get_posts(conn, 'foo')
#Then
self.assertTrue(ret)
def test_get_posts_summary(self):
'''
Simple test to ensure that get_posts_summary correctly
aggregates the stats
'''
#Given
title ='test_post'
sub_id = 'baz'
true_values = {}
mocked_submission = Mock()
mocked_submission.replace_more_comments.return_value = None
mocked_submission.id = sub_id
mocked_submission.title = title
true_values[sub_id] = {}
true_values[sub_id]['count'] = 0
true_values[sub_id]['score'] = 0
true_values[sub_id]['total_len'] = 0
true_values[sub_id]['title'] = title
mocked_comments = []
for a in ['foo', 'bar']:
for i in range(random.randint(0,10)):
ups = random.randint(0,100)
downs = random.randint(0,10)
created = random.randint(0,10000)
body = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(random.randint(1,200)))
true_values[sub_id]['count'] += 1
true_values[sub_id]['score'] += ups - downs
true_values[sub_id]['total_len'] += len(body)
comment = Mock()
comment.author = a
comment.ups = ups
comment.downs = downs
comment.created = created
comment.body = body
mocked_comments.append(comment)
mocked_submission.comments = mocked_comments
conn = Mock()
mocked_subreddit = Mock()
mocked_subreddit.get_new.return_value = [mocked_submission]
conn.get_subreddit.return_value = mocked_subreddit
true_df = stats.convert_stats_to_dataframe(true_values)
#When
df = subreddit.get_posts_summary(conn, 'foo')
#Then
self.assertTrue((true_df.values == df.values).all())
if __name__ == '__main__':
unittest.main()
| mit | 5,476,614,130,928,023,000 | 31.70438 | 123 | 0.567571 | false |
joebullard/slack-doorbell-camera | main.py | 1 | 2043 | import argparse
import sys
import time
from doorbell import SlackDoorbell
from ringer import FaceDetectionDoorbellRinger
from visionapi import VisionAPIClient
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'--motion-output-dir',
required=True,
type=str,
help='Path to the motion daemon output directory'
)
parser.add_argument(
'--webhook-url',
required=True,
type=str,
help='Slack Incoming Webhook URL'
)
parser.add_argument(
'--json-keyfile',
required=False,
default=None,
type=str,
help='Path to a Google Cloud service account credentials JSON'
)
parser.add_argument(
'--stream-addr',
required=False,
default=None,
type=str,
help='IP address of webcam stream'
)
parser.add_argument(
'--sleep-secs',
required=False,
default=1.0,
type=float,
help='Number of seconds to sleep between image polling'
)
parser.add_argument(
'--min-confidence',
required=False,
default=0.50,
type=float,
help='Minimum detection confidence threshold of face detection'
)
parser.add_argument(
'--timeout-secs',
required=False,
default=20,
type=float,
help='Number of seconds to wait before ringing again')
parser.add_argument(
'--verbose',
required=False,
action='store_true',
help='If True, print update of every single detection iteration'
)
return parser.parse_args(argv)
def main(argv):
args = parse_args(argv)
vision = VisionAPIClient(args.json_keyfile)
doorbell = SlackDoorbell(args.webhook_url, args.stream_addr)
ringer = FaceDetectionDoorbellRinger(vision, doorbell,
args.min_confidence, args.timeout_secs, verbose=args.verbose)
ringer.run(args.motion_output_dir, args.sleep_secs)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 | -4,388,402,259,583,009,300 | 26.608108 | 72 | 0.619677 | false |
periscope-ps/unis | periscope/handlers/datahandler.py | 1 | 4785 | # =============================================================================
# periscope-ps (unis)
#
# Copyright (c) 2012-2016, Trustees of Indiana University,
# All rights reserved.
#
# This software may be modified and distributed under the terms of the BSD
# license. See the COPYING file for details.
#
# This software was created at the Indiana University Center for Research in
# Extreme Scale Technologies (CREST).
# =============================================================================
#!/usr/bin/env python
import json
import functools
import tornado.web
from urllib.parse import urlparse,urlunparse
import periscope.settings as settings
from periscope.db import dumps_mongo, DBLayer
from periscope.settings import MIME
from periscope.handlers.networkresourcehandler import NetworkResourceHandler
class DataHandler(NetworkResourceHandler):
def _validate_request(self, res_id, allow_id = False, require_id = False):
if self.accept_content_type not in self.schemas_single:
message = "Schema is not defined for content of type '%s'" % \
(self.accept_content_type)
self.send_error(500, message=message)
return False
if self._validate_psjson_profile():
return False
return True
def _add_post_metadata(self, resource, res_id = None):
loc = urlparse(self.request.full_url())._replace(query=None, fragment=None)
resource["mid"] = res_id or resource["mid"]
resource["selfRef"] = loc._replace(path=f"{loc.path}/{resource['mid']}").geturl()
resource["$schema"] = resource.get("$schema", self.schemas_single[MIME['PSJSON']])
return resource
async def _insert(self, resources):
mids = {}
for resource in resources:
if resource["mid"] not in mids:
mids[resource["mid"]] = []
mids[resource["mid"]].extend(resource["data"])
for mid, data in mids.items():
push_data = { 'id': mid, 'data': data }
self._subscriptions.publish(push_data, self._collection_name, { "action": "POST", "collection": "data/{}".format(mid) },
self.trim_published_resource)
await DBLayer(self.application.db, mid, True).insert(data)
async def _post_return(self, resources):
return
async def _return_resources(self, mid, query):
resp = []
async for record in DBLayer(self.application.db, mid, True).find(query):
resp.append(ObjectDict._from_mongo(record))
if len(resp) == 1:
location = self.request.full_url().split('?')[0]
if not location.endswith(response[0][self.Id]):
location = location + "/" + response[0][self.Id]
self.set_header("Location", location)
return resp[0]
else:
return resp
@tornado.web.removeslash
async def get(self, res_id=None):
if not res_id:
self.write_error(403, message = "Data request must include a metadata id")
return
# Parse arguments and set up query
try:
parsed = await self._parse_get_arguments()
options = dict(query = parsed["query"]["query"],
limit = parsed["limit"],
sort = parsed["sort"],
skip = parsed["skip"])
if not options["limit"]:
options.pop("limit", None)
options["query"]["\\$status"] = { "$ne": "DELETED" }
options["fields"] = { "_id": 0 }
except Exception as exp:
self.write_error(403, message = exp)
return
# we don't want to wait here since this is a direct query on the state of the collection
# we could have an SSE endpoint that implemented a hanging GET, allowing more data
# over the HTTP connection as it arrived
query = options.pop("query")
count = await DBLayer(self.application.db, res_id, True).count(query, **options)
if not count:
self.write('[]')
return
first = True
async for record in DBLayer(self.application.db, res_id, True).find(query):
self.write('[\n' if first else ',\n')
first = False
self.write(dumps_mongo(record, indent=2).replace('\\\\$', '$').replace('$DOT$', '.'))
self.write('\n]')
await self._add_response_headers(count)
self.set_status(200)
self.finish()
def trim_published_resource(self, resource, fields):
return {resource['id']: resource['data']}
| bsd-3-clause | -5,861,323,693,186,295,000 | 38.221311 | 132 | 0.55674 | false |
NervanaSystems/ngraph | test/ref_generators/generate_convolution_ref.py | 1 | 19772 | #!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import sys
import numpy as np
import math
import random
from operator import mul
# Generates an array of random floating point literals of the given length, from a fixed seed.
def random_array_float_literals(length, seed=8086):
literals = []
random.seed(seed)
for i in range(0, length):
# generate numbers that can be exactly represented in binary
sig_bits = 6
range_bits = 2
literal_n = np.float32(random.randint(-pow(2, sig_bits-1),
pow(2, sig_bits-1))) / pow(2.0, sig_bits - range_bits)
literals.append(str(literal_n))
return literals
# Elementwise addition on tuples.
def tuple_plus(t1, t2):
assert(len(t1) == len(t2))
res = ()
for (x, y) in zip(list(t1), list(t2)):
res = res + (x+y,)
return res
# Elementwise multiplication on tuples.
def tuple_times(t1, t2):
assert(len(t1) == len(t2))
res = ()
for (x, y) in zip(list(t1), list(t2)):
res = res + (x*y,)
return res
#
# Convolution reference
#
# Arguments:
# data_batch : [N ][Ci][D1]...[Dn], n > 0
# filter : [Co][Ci][W1]...[Wn]
# move_strides = (s1,...,sn)
# filter_dilation = (l1,...,ln)
# below_pads = (p1,...,pn)
# above_pads = (q1,...,qn)
# data_dilation = (t1,...,tn)
#
# Returns:
# output_batch : [N ][Co][D'1]...[D'n]
#
# Where the D's are computed according to TensorFlow-style "valid" convolution rules, but *after* padding.
# See https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
#
def convolution_ref(data_batch, filter, move_strides, filter_dilation, below_pads, above_pads, data_dilation):
assert(len(data_batch.shape) == len(filter.shape))
assert(len(data_batch.shape) > 2)
assert(len(data_batch.shape) <= 6)
assert(data_batch.shape[1] == filter.shape[1])
assert(len(move_strides) == len(data_batch.shape) - 2)
assert(len(filter_dilation) == len(data_batch.shape) - 2)
assert(len(data_dilation) == len(data_batch.shape) - 2)
# dilate the input batch
new_item_shape = (np.array(data_batch.shape[2:]) - 1) * data_dilation + 1
new_data_batch_shape = list(
np.array(data_batch.shape[:2])) + list(new_item_shape)
new_data_batch = np.zeros(new_data_batch_shape)
for n in range(0, new_data_batch_shape[0]):
for c in range(0, new_data_batch_shape[1]):
if new_data_batch.ndim == 3:
new_data_batch[n, c, 0::data_dilation[0]] = data_batch[n][c]
elif new_data_batch.ndim == 4:
new_data_batch[n, c, 0::data_dilation[0],
0::data_dilation[1]] = data_batch[n][c]
elif new_data_batch.ndim == 5:
new_data_batch[n, c, 0::data_dilation[0],
0::data_dilation[1], 0::data_dilation[2]] = data_batch[n][c]
elif new_data_batch.ndim == 6:
new_data_batch[n, c, 0::data_dilation[0], 0::data_dilation[1],
0::data_dilation[2], 0::data_dilation[3]] = data_batch[n][c]
else:
assert(False)
data_batch = new_data_batch
# Pad the input batch wherever the pads are positive.
# Have to add values for the spatial and channel dims.
below_pads_pos = (0, 0) + tuple(np.clip(below_pads, 0, None))
# Have to add values for the spatial and channel dims.
above_pads_pos = (0, 0) + tuple(np.clip(above_pads, 0, None))
data_batch = np.pad(data_batch, list(
zip(below_pads_pos, above_pads_pos)), mode='constant', constant_values=0)
# Slice the input batch wherever the pads are negative.
slice_bottoms = (0, 0) + tuple(-np.clip(below_pads, None, 0))
slice_tops = (0, 0) + tuple(np.clip(above_pads, None, 0))
slices = list(map(lambda p: slice(
p[0], p[1] if p[1] < 0 else None), zip(slice_bottoms, slice_tops)))
data_batch = data_batch[tuple(slices)]
item_count = data_batch.shape[0] # N
ci_count = data_batch.shape[1] # Ci
co_count = filter.shape[0] # Co
input_item_shape = list(data_batch.shape[2:]) # D1, ..., Dn
window_virtual_shape = list(filter.shape[2:]) # W1, ..., Wn
# This is not used in computation but we will calculate it for a check to make sure the window fits.
window_physical_shape = []
for (d_in, d_virt, dil) in zip(input_item_shape, window_virtual_shape, filter_dilation):
d_phys = (d_virt - 1) * dil + 1
assert(d_phys <= d_in)
window_physical_shape.append(d_phys)
output_item_shape = [] # D'1,...,D'n
for (d_in, d_win, dil, mov) in zip(input_item_shape, window_virtual_shape, filter_dilation, move_strides):
# Formula is taken from TF's definition for VALID convolution.
d_out = int(
math.ceil((float(d_in) - (float(d_win) - 1.0) * float(dil))/float(mov)))
assert(d_out > 0)
output_item_shape.append(d_out)
output_shape = [item_count, co_count]+output_item_shape # N,Co,D'1,...,D'n
output_batch = np.zeros(output_shape)
# Walk over the output batch space.
output_it = np.nditer(output_batch, flags=['multi_index'])
while not output_it.finished:
# Break up the output coordinate to figure out where we are in terms of batch index, output channel, and spatial position.
output_index = output_it.multi_index
item, co, output_pos = output_index[0], output_index[1], output_index[2:]
# Walk over the filter for the current output channel.
filter_it = np.nditer(filter[co], flags=['multi_index'])
while not filter_it.finished:
# Break up the filter coordinate to figure out where we are in terms of input channel and filter shape position.
filter_index = filter_it.multi_index
ci, filter_pos = filter_index[0], filter_index[1:]
# Build up the coordinate within the space N,Ci,D1,...,Dn that we need to read from in the input batch.
input_index = (item, ci) + (tuple_plus(tuple_times(output_pos,
move_strides), tuple_times(filter_pos, filter_dilation)))
# Add to the sum-of-products.
output_batch[output_index] = output_batch[output_index] + \
filter[(co,) + filter_index] * data_batch[input_index]
filter_it.iternext()
output_it.iternext()
return output_batch
def shape_str(shape):
result = ''
first = True
for d in shape:
if first:
result = ('%d' % d)
first = False
else:
result = result + (',%d' % d)
return result
def scalar_str(x):
result = ('%.1000g' % x)
# This next part is a bit stupid.
if "." not in result and "e" not in result:
result = result + ".0f"
else:
result = "%.8ff" % float(result)
return result
def data_str(data):
result = ''
first = True
for x in np.nditer(data):
if first:
result = scalar_str(x)
first = False
else:
result = result + ',' + scalar_str(x)
return result
def shape_size(shape):
result = 1
for l in shape:
result = result * l
return result
def emit_test(t, f):
test_name, input_batch_shape, filters_shape, move_strides, filter_dilation, below_pads, above_pads, data_dilation, bprop = t
input_batch_literals = random_array_float_literals(
shape_size(input_batch_shape))
filters_literals = random_array_float_literals(shape_size(filters_shape))
input_batch_array = np.array(
list(map(lambda s: np.float32(s), input_batch_literals)))
input_batch_array.shape = input_batch_shape
filters_array = np.array(
list(map(lambda s: np.float32(s), filters_literals)))
filters_array.shape = filters_shape
print("Generating convolution test '%s'..." % test_name)
output_batch_data = convolution_ref(
input_batch_array, filters_array, move_strides, filter_dilation, below_pads, above_pads, data_dilation)
template = '''
// !!!!!!!!!!!!!! THIS FILE IS AUTOGENERATED OUTSIDE OF THE BUILD PROCESS !!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DO NOT EDIT THIS FILE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// DO NOT EDIT THIS FILE. If you want to add new tests, you should edit
// test/ref_generators/generate_convolution_ref.py and regenerate this file.
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DO NOT EDIT THIS FILE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!! THIS FILE IS AUTOGENERATED OUTSIDE OF THE BUILD PROCESS !!!!!!!!!!!!!!
NGRAPH_TEST (${BACKEND_NAME}, %s)
{
Shape shape_a{%s};
Shape shape_b{%s};
Shape shape_r{%s};
auto make_graph = [shape_a, shape_b] {
auto A = make_shared<op::v0::Parameter>(element::f32, shape_a);
auto B = make_shared<op::v0::Parameter>(element::f32, shape_b);
return make_shared<Function>(make_shared<op::v0::Convolution>(A, B,
Strides{%s}, // move_strides
Strides{%s}, // filter_dilation
CoordinateDiff{%s}, // below_pads
CoordinateDiff{%s}, // above_pads
Strides{%s}), // data_dilation
ParameterVector{A, B});
};
auto backend = runtime::Backend::create("${BACKEND_NAME}");
auto function = make_graph();
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{%s});
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{%s});
auto result = backend->create_tensor(element::f32, shape_r);
vector<float> expected_result{%s};
auto handle = backend->compile(function);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result), tolerance));
// only test backprop for certain cases as it takes significant compute resources
%sEXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), make_graph, {a, b}, .01f, .01f));
}
'''
f.write(template % (test_name,
shape_str(input_batch_shape),
shape_str(filters_shape),
shape_str(output_batch_data.shape),
shape_str(move_strides),
shape_str(filter_dilation),
shape_str(below_pads),
shape_str(above_pads),
shape_str(data_dilation),
",".join(map(lambda s: "%.8ff" %
float(s), input_batch_literals)),
",".join(map(lambda s: "%.8ff" %
float(s), filters_literals)),
data_str(output_batch_data),
bprop))
# filter data
# test name skip list i batch shape filts shape stride dilation below-pads above-pads dilation bprop?
tests = [
("convolution_2d_1item", (1, 1, 3, 5), (2, 1, 2, 2),
(1, 1), (1, 1), (0, 0), (0, 0), (1, 1), ""),
("convolution_2d_1item_padded_1_1x1_1", (1, 1, 3, 5), (2, 1, 2, 2),
(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), ""),
("convolution_2d_1item_padded_2_3x4_5", (1, 1, 3, 5), (2, 1, 2, 2),
(1, 1), (1, 1), (2, 3), (4, 5), (1, 1), ""),
("convolution_2d_2items", (2, 1, 3, 5), (2, 1, 2, 2),
(1, 1), (1, 1), (0, 0), (0, 0), (1, 1), ""),
("convolution_2d_2items_strided", (2, 1, 3, 5), (2, 1, 2, 2),
(2, 2), (1, 1), (0, 0), (0, 0), (1, 1), ""),
("convolution_2d_2items_strided_padded", (2, 1, 3, 5), (2, 1, 2, 2),
(2, 2), (1, 1), (4, 2), (5, 7), (1, 1), ""),
("convolution_2d_2items_strided_padded_same", (2, 1, 3, 5), (2, 1, 2, 2),
(2, 2), (1, 1), (2, 2), (2, 2), (1, 1), ""),
("convolution_2d_2items_dilated", (2, 1, 3, 5), (2, 1, 2, 2),
(1, 1), (2, 2), (0, 0), (0, 0), (1, 1), ""),
("convolution_2d_2items_dilated_padded", (2, 1, 3, 5), (2, 1, 2, 2),
(1, 1), (2, 2), (4, 2), (5, 7), (1, 1), ""),
("convolution_3d_2items", (2, 1, 3, 5, 8), (2, 1, 2, 2, 3),
(1, 1, 1), (1, 1, 1), (0, 0, 0), (0, 0, 0), (1, 1, 1), ""),
("convolution_4d_2items", (2, 1, 3, 5, 8, 7), (2, 1, 2, 2, 3, 1),
(1, 1, 1, 1), (1, 1, 1, 1), (0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), "// "),
("convolution_4d_4items", (4, 3, 3, 5, 8, 7), (4, 3, 2, 2, 3, 1),
(1, 1, 1, 1), (1, 1, 1, 1), (0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), "// "),
("convolution_4d_4items_padded_neg", (4, 3, 3, 5, 8, 7), (4, 3, 2, 2, 3, 1),
(1, 1, 1, 1), (1, 1, 1, 1), (-1, 2, -3, 2), (1, 0, 0, -3), (1, 1, 1, 1), "// "),
("convolution_4d_4items_strided", (4, 3, 3, 5, 8, 7), (4, 3, 2, 2, 3, 1),
(2, 1, 3, 2), (1, 1, 1, 1), (0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), "// "),
("convolution_4d_4items_dilated", (4, 3, 3, 5, 8, 7), (4, 3, 2, 2, 3, 1),
(1, 1, 1, 1), (2, 1, 3, 2), (0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), "// "),
("convolution_4d_4items_strided_dilated", (4, 3, 8, 8, 8, 8), (4, 3, 2, 2, 3, 1),
(3, 2, 2, 3), (2, 1, 3, 2), (0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), "// "),
("convolution_4d_4items_strided_dilated_padded",
(4, 3, 8, 8, 8, 8), (4, 3, 2, 2, 3, 1), (3, 2, 2, 3), (2, 1, 3, 2), (2, 4, 6, 8), (1, 3, 5, 7), (1, 1, 1, 1), "// "),
("convolution_4d_4items_strided_dilated_padded_neg",
(4, 3, 8, 8, 8, 8), (4, 3, 2, 2, 3, 1), (3, 2, 2, 3), (2, 1, 3, 2), (-2, 4, 0, 5), (1, 3, -1, -4), (1, 1, 1, 1), "// "),
("convolution_4d_4items_strided_dilated_padded_same",
(4, 3, 8, 8, 8, 8), (4, 3, 2, 2, 3, 1), (3, 2, 2, 3), (2, 1, 3, 2), (3, 3, 3, 3), (3, 3, 3, 3), (1, 1, 1, 1), "// "),
("convolution_2d_1item_1o1i_data_dilated", (1, 1, 3, 5), (1, 1, 2, 2),
(1, 1), (1, 1), (0, 0), (0, 0), (2, 2), ""),
("convolution_2d_1item_2o1i_data_dilated", (1, 1, 3, 5), (2, 1, 2, 2),
(1, 1), (1, 1), (0, 0), (0, 0), (2, 2), ""),
("convolution_2d_1item_2o2i_data_dilated", (1, 2, 3, 5), (2, 2, 2, 2),
(1, 1), (1, 1), (0, 0), (0, 0), (2, 2), ""),
("convolution_2d_1item_5o3i_data_dilated", (1, 3, 3, 5), (5, 3, 2, 2),
(1, 1), (1, 1), (0, 0), (0, 0), (2, 2), ""),
("convolution_2d_2item_5o3i_data_dilated", (2, 3, 3, 5), (5, 3, 2, 2),
(1, 1), (1, 1), (0, 0), (0, 0), (2, 2), ""),
("convolution_2d_8item_large_5o3i_data_dilated",
(8, 3, 16, 16), (5, 3, 2, 2), (1, 1), (1, 1), (0, 0), (0, 0), (2, 2), "// "),
("convolution_2d_8item_large_5o3i_uneven_filter_data_dilated",
(8, 3, 16, 16), (5, 3, 2, 3), (1, 1), (1, 1), (0, 0), (0, 0), (2, 2), "// "),
("convolution_2d_8item_large_5o3i_uneven_filter_uneven_data_dilation_data_dilated",
(8, 3, 16, 16), (5, 3, 2, 3), (1, 1), (1, 1), (0, 0), (0, 0), (2, 3), "// "),
("convolution_3d_2item_large_5o3i_uneven_filter_uneven_data_dilation_data_dilated",
(2, 3, 8, 8, 8), (5, 3, 2, 3, 4), (1, 1, 1), (1, 1, 1), (0, 0, 0), (0, 0, 0), (2, 3, 2), "// "),
("convolution_3d_1item_large_5o3i_padded_uneven_filter_uneven_data_dilation_data_dilated",
(1, 3, 8, 8, 8), (5, 3, 2, 3, 4), (1, 1, 1), (1, 1, 1), (2, 1, 2), (1, 2, 3), (2, 3, 2), "// "),
("convolution_3d_2item_large_5o3i_padded_strided_uneven_filter_uneven_data_dilation_data_dilated",
(2, 3, 8, 8, 8), (5, 3, 2, 3, 4), (2, 3, 2), (1, 1, 1), (2, 1, 2), (1, 2, 3), (2, 3, 2), "// "),
("convolution_3d_2item_large_5o3i_padded_strided_uneven_filter_uneven_data_dilation_filter_dilated_data_dilated",
(2, 3, 8, 8, 8), (5, 3, 2, 3, 4), (2, 3, 2), (3, 2, 2), (2, 1, 2), (1, 2, 3), (2, 3, 2), "// "),
]
def main():
assert(len(sys.argv) > 1)
f = open(sys.argv[1], 'w')
f.write('''//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
// !!!!!!!!!!!!!! THIS FILE IS AUTOGENERATED OUTSIDE OF THE BUILD PROCESS !!!!!!!!!!!!!!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DO NOT EDIT THIS FILE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// It takes quite a while to compute the results.
//
// DO NOT EDIT THIS FILE. If you want to add new tests, you should edit
// test/ref_generators/generate_convolution_ref.py and regenerate this file.
//
// To regenerate:
//
// $ cd <ngraph source dir>/test
// $ ./update_convolution_reference.sh
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DO NOT EDIT THIS FILE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !!!!!!!!!!!!!! THIS FILE IS AUTOGENERATED OUTSIDE OF THE BUILD PROCESS !!!!!!!!!!!!!!
//
// clang-format off
#include <cmath>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/test_tools.hpp"
#include "util/autodiff/numeric_compare.hpp"
#include "util/all_close_f.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
// for float this will be 18 bits matching
// for bfloat this will be 6 bits matching
constexpr int three_quarters_of_available_bits = (MAX_FLOAT_BITS * 3) / 4;
constexpr int tolerance = FLOAT_MANTISSA_BITS - three_quarters_of_available_bits;
''')
for t in tests:
emit_test(t, f)
f.write('''
// clang-format on
''')
f.close()
if __name__ == "__main__":
main()
| apache-2.0 | 1,861,128,612,188,194,300 | 43.133929 | 166 | 0.507789 | false |
hovo1990/deviser | generator/cmake_files/CMakeFiles.py | 1 | 4088 | #!/usr/bin/env python
#
# @file CMakeFiles.py
# @brief class for generating the cmake files
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2015 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
import os
from util import global_variables
from . import PackageFile
from . import RegisterFile
from . import BaseCMakeFiles
class CMakeFiles():
"""Class for all cmake files"""
def __init__(self, pkg_object, this_dir, verbose=False):
self.verbose = verbose
self.this_dir = this_dir
# # members from object
self.package = pkg_object['name']
self.language = global_variables.language
self.elements = pkg_object['baseElements']
self.plugins = pkg_object['plugins']
#########################################################################
def write_package_files(self):
name = '{0}-package'.format(self.package)
ext = PackageFile.PackageFile(name, self.package, False)
if self.verbose:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
os.chdir('src')
ext = PackageFile.PackageFile(name, self.package, True)
if self.verbose:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
os.chdir(self.this_dir)
def write_register_files(self):
name = '{0}-register'.format(self.package)
ext = RegisterFile.RegisterFile(name, self.package, False)
if self.verbose:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
ext = RegisterFile.RegisterFile(name, self.package, True)
if self.verbose:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
########################################################################
def write_files(self):
self.write_package_files()
os.chdir('src/{0}/packages'.format(self.language))
self.write_register_files()
os.chdir(self.this_dir)
def write_other_library_files(self):
os.chdir(self.this_dir)
cmake = BaseCMakeFiles.BaseCMakeFiles(self.verbose)
cmake.write_files() | lgpl-2.1 | -6,793,863,203,549,932,000 | 38.699029 | 78 | 0.640411 | false |
magenta/magenta | magenta/pipelines/statistics.py | 1 | 8973 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines statistics objects for pipelines."""
import abc
import bisect
import copy
import tensorflow.compat.v1 as tf
class MergeStatisticsError(Exception):
pass
class Statistic(object):
"""Holds statistics about a Pipeline run.
Pipelines produce statistics on each call to `transform`.
`Statistic` objects can be merged together to aggregate
statistics over the course of many calls to `transform`.
A `Statistic` also has a string name which is used during merging. Any two
`Statistic` instances with the same name may be merged together. The name
should also be informative about what the `Statistic` is measuring. Names
do not need to be unique globally (outside of the `Pipeline` objects that
produce them) because a `Pipeline` that returns statistics will prepend
its own name, effectively creating a namespace for each `Pipeline`.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name):
"""Constructs a `Statistic`.
Subclass constructors are expected to call this constructor.
Args:
name: The string name for this `Statistic`. Any two `Statistic` objects
with the same name will be merged together. The name should also
describe what this Statistic is measuring.
"""
self.name = name
@abc.abstractmethod
def _merge_from(self, other):
"""Merge another Statistic into this instance.
Takes another Statistic of the same type, and merges its information into
this instance.
Args:
other: Another Statistic instance.
"""
pass
@abc.abstractmethod
def _pretty_print(self, name):
"""Return a string representation of this instance using the given name.
Returns a human readable and nicely presented representation of this
instance. Since this instance does not know what it's measuring, a string
name is given to use in the string representation.
For example, if this Statistic held a count, say 5, and the given name was
'error_count', then the string representation might be 'error_count: 5'.
Args:
name: A string name for this instance.
Returns:
A human readable and preferably a nicely presented string representation
of this instance.
"""
pass
@abc.abstractmethod
def copy(self):
"""Returns a new copy of `self`."""
pass
def merge_from(self, other):
if not isinstance(other, Statistic):
raise MergeStatisticsError(
'Cannot merge with non-Statistic of type %s' % type(other))
if self.name != other.name:
raise MergeStatisticsError(
'Name "%s" does not match this name "%s"' % (other.name, self.name))
self._merge_from(other)
def __str__(self):
return self._pretty_print(self.name)
def merge_statistics(stats_list):
"""Merge together Statistics of the same name in the given list.
Any two Statistics in the list with the same name will be merged into a
single Statistic using the `merge_from` method.
Args:
stats_list: A list of `Statistic` objects.
Returns:
A list of merged Statistics. Each name will appear only once.
"""
name_map = {}
for stat in stats_list:
if stat.name in name_map:
name_map[stat.name].merge_from(stat)
else:
name_map[stat.name] = stat
return list(name_map.values())
def log_statistics_list(stats_list, logger_fn=tf.logging.info):
"""Calls the given logger function on each `Statistic` in the list.
Args:
stats_list: A list of `Statistic` objects.
logger_fn: The function which will be called on the string representation
of each `Statistic`.
"""
for stat in sorted(stats_list, key=lambda s: s.name):
logger_fn(str(stat))
class Counter(Statistic):
"""Represents a count of occurrences of events or objects.
`Counter` can help debug Pipeline computations. For example, by counting
objects (consumed, produced, etc...) by the Pipeline, or occurrences of
certain cases in the Pipeline.
"""
def __init__(self, name, start_value=0):
"""Constructs a Counter.
Args:
name: String name of this counter.
start_value: What value to start the count at.
"""
super(Counter, self).__init__(name)
self.count = start_value
def increment(self, inc=1):
"""Increment the count.
Args:
inc: (defaults to 1) How much to increment the count by.
"""
self.count += inc
def _merge_from(self, other):
"""Adds the count of another Counter into this instance."""
if not isinstance(other, Counter):
raise MergeStatisticsError(
'Cannot merge %s into Counter' % other.__class__.__name__)
self.count += other.count
def _pretty_print(self, name):
return '%s: %d' % (name, self.count)
def copy(self):
return copy.copy(self)
class Histogram(Statistic):
"""Represents a histogram of real-valued events.
A histogram is a list of counts, each over a range of values.
For example, given this list of values [0.5, 0.0, 1.0, 0.6, 1.5, 2.4, 0.1],
a histogram over 3 ranges [0, 1), [1, 2), [2, 3) would be:
[0, 1): 4
[1, 2): 2
[2, 3): 1
Each range is inclusive in the lower bound and exclusive in the upper bound
(hence the square open bracket but curved close bracket).
Usage examples:
A distribution over input/output lengths.
A distribution over compute times.
"""
def __init__(self, name, buckets, verbose_pretty_print=False):
"""Initializes the histogram with the given ranges.
Args:
name: String name of this histogram.
buckets: The ranges the histogram counts over. This is a list of values,
where each value is the inclusive lower bound of the range. An extra
range will be implicitly defined which spans from negative infinity
to the lowest given lower bound. The highest given lower bound
defines a range spaning to positive infinity. This way any value will
be included in the histogram counts. For example, if `buckets` is
[4, 6, 10] the histogram will have ranges
[-inf, 4), [4, 6), [6, 10), [10, inf).
verbose_pretty_print: If True, self.pretty_print will print the count for
every bucket. If False, only buckets with positive counts will be
printed.
"""
super(Histogram, self).__init__(name)
# List of inclusive lowest values in each bucket.
self.buckets = [float('-inf')] + sorted(set(buckets))
self.counters = dict((bucket_lower, 0) for bucket_lower in self.buckets)
self.verbose_pretty_print = verbose_pretty_print
# https://docs.python.org/2/library/bisect.html#searching-sorted-lists
def _find_le(self, x):
"""Find rightmost bucket less than or equal to x."""
i = bisect.bisect_right(self.buckets, x)
if i:
return self.buckets[i-1]
raise ValueError
def increment(self, value, inc=1):
"""Increment the bucket containing the given value.
The bucket count for which ever range `value` falls in will be incremented.
Args:
value: Any number.
inc: An integer. How much to increment the bucket count by.
"""
bucket_lower = self._find_le(value)
self.counters[bucket_lower] += inc
def _merge_from(self, other):
"""Adds the counts of another Histogram into this instance.
`other` must have the same buckets as this instance. The counts
from `other` are added to the counts for this instance.
Args:
other: Another Histogram instance with the same buckets as this instance.
Raises:
MergeStatisticsError: If `other` is not a Histogram or the buckets
are not the same.
"""
if not isinstance(other, Histogram):
raise MergeStatisticsError(
'Cannot merge %s into Histogram' % other.__class__.__name__)
if self.buckets != other.buckets:
raise MergeStatisticsError(
'Histogram buckets do not match. Expected %s, got %s'
% (self.buckets, other.buckets))
for bucket_lower, count in other.counters.items():
self.counters[bucket_lower] += count
def _pretty_print(self, name):
b = self.buckets + [float('inf')]
return ('%s:\n' % name) + '\n'.join(
[' [%s,%s): %d' % (lower, b[i+1], self.counters[lower])
for i, lower in enumerate(self.buckets)
if self.verbose_pretty_print or self.counters[lower]])
def copy(self):
return copy.copy(self)
| apache-2.0 | 4,317,756,786,415,895,600 | 31.988971 | 79 | 0.678034 | false |
ryos36/polyphony-tutorial | Tutorial_1/root_2.py | 1 | 1177 | from polyphony import testbench
shift_n = 30
def round(x):
return x
def abs(x):
if x < 0 :
return -x
else:
return x
def i_mul_floor(x, y):
tmp = x * y
return tmp >> shift_n
def i_mul_ceil(x, y):
tmp = x * y
d = tmp & (1 << shift_n - 1)
tmp >>= shift_n
if d :
tmp += 1
return tmp
def i_square(x):
return i_mul_floor(x, x)
def i_root(x):
if x == 0 :
return 0
epsilon = 1
numGuesses = 0
new_x = x << shift_n
low_n = 0
if new_x > (1 << shift_n):
high_n = new_x
else:
high_n = (1 << shift_n)
ans = (low_n + high_n) >> 1
old_ans = 0
while (abs(i_square(ans) - new_x)) >= epsilon:
print("low = ", low_n , " high = ", high_n, " ans = ", ans)
if old_ans == ans :
break
numGuesses += 1
if (i_square(ans) < new_x):
low_n = ans
else:
high_n = ans
old_ans = ans
ans = (low_n + high_n) >> 1
return ans
@testbench
def test():
x = 25
result = i_root(x)
print(result)
x = 2
result = i_root(x)
print(result)
test()
| mit | 3,411,647,756,418,904,600 | 15.577465 | 67 | 0.451147 | false |
arielalmendral/ert | python/tests/core/ecl/test_faults.py | 1 | 25965 | #!/usr/bin/env python
# Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'test_faults.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from unittest import skipIf
import time
from ert import util
from ert.ecl.faults import FaultCollection, Fault, FaultLine, FaultSegment,FaultBlockLayer
from ert.ecl import EclGrid, EclKW, EclTypeEnum
from ert.test import ExtendedTestCase, TestAreaContext
from ert.geo import Polyline , CPolyline
class FaultTest(ExtendedTestCase):
@classmethod
def setUpClass(cls):
cls.grid = EclGrid.create_rectangular( (151,100,50) , (1,1,1))
def setUp(self):
self.faults1 = self.createTestPath("local/ECLIPSE/FAULTS/fault1.grdecl")
self.faults2 = self.createTestPath("local/ECLIPSE/FAULTS/fault2.grdecl")
def test_PolylineIJ(self):
nx = 10
ny = 10
nz = 10
grid = EclGrid.create_rectangular( (nx,ny,nz) , (0.1,0.1,0.1))
f = Fault(grid , "F")
f.addRecord(0 , 1 , 0 , 0 , 0,0 , "Y-")
f.addRecord(2 , 2 , 0 , 1 , 0,0 , "X-")
f.addRecord(2 , 2 , 1 , 1 , 0,0 , "Y")
pl = f.getIJPolyline( 0 )
self.assertEqual(pl , [(0,0) , (2,0) , (2,2) , (3,2)])
def test_empty_collection(self):
faults = FaultCollection()
self.assertEqual(0 , len(faults))
self.assertFalse( faults.hasFault("FX") )
with self.assertRaises(TypeError):
f = faults[ [] ]
with self.assertRaises(KeyError):
f = faults["FX"]
with self.assertRaises(IndexError):
f = faults[0]
self.assertFalse( "NAME" in faults )
def test_collection_invalid_arg(self):
with self.assertRaises(ValueError):
faults = FaultCollection(self.faults1)
with self.assertRaises(ValueError):
faults = FaultCollection(self.faults1 , self.faults2)
def test_splitLine(self):
faults = FaultCollection(self.grid)
with self.assertRaises(ValueError):
# Not slash terminated
t = faults.splitLine("'F1' 149 149 29 29 1 43 'Y'")
with self.assertRaises(ValueError):
# Not integer
t = faults.splitLine("'F1' 149 149 29 29 1 43X 'Y' /")
with self.assertRaises(ValueError):
# Missing item
t = faults.splitLine("'F1' 149 29 29 1 43 'Y' /")
with self.assertRaises(ValueError):
# Quote fuckup
t = faults.splitLine("'F1 149 149 29 29 1 43 'X' /")
def test_empty_fault( self ):
f = Fault(self.grid , "NAME")
self.assertEqual("NAME" , f.getName())
with self.assertRaises(Exception):
g = f["Key"]
def test_empty_faultLine(self):
fl = FaultLine(self.grid , 10)
self.assertEqual( 10 , fl.getK())
self.assertEqual( 0 , len(fl) )
with self.assertRaises(TypeError):
f = fl[ "Test" ]
with self.assertRaises(IndexError):
f = fl[0]
def test_faultLine_center(self):
nx = 10
ny = 10
nz = 2
grid = EclGrid.create_rectangular( (nx,ny,nz) , (0.1,0.1,0.1))
fl = FaultLine(grid , 0)
C1 = (nx + 1) * 5 + 3
C2 = C1 + 2
C3 = C2 + 2
s1 = FaultSegment( C1 , C2 )
s2 = FaultSegment( C2 , C3 )
fl.tryAppend( s1 )
fl.tryAppend( s2 )
self.assertEqual( len(fl) , 2 )
self.assertEqual( fl.center() , (0.50 , 0.50) )
def test_faultLine(self):
fl = FaultLine(self.grid , 10)
S1 = FaultSegment(0 , 10)
S2 = FaultSegment(10 , 20)
fl.tryAppend( S1 )
fl.tryAppend( S2 )
fl.verify()
S3 = FaultSegment(20 , 30)
fl.tryAppend( S3 )
fl.verify()
#---
fl = FaultLine(self.grid , 10)
S1 = FaultSegment(0 , 10)
S2 = FaultSegment(20 , 10)
fl.tryAppend( S1 )
self.assertTrue( fl.tryAppend( S2 ) )
fl.verify()
#---
fl = FaultLine(self.grid , 10)
S1 = FaultSegment(10 , 0)
S2 = FaultSegment(20 , 10)
fl.tryAppend( S1 )
fl.tryAppend( S2 )
fl.verify()
#---
fl = FaultLine(self.grid , 10)
S1 = FaultSegment(10 , 0)
S2 = FaultSegment(10 , 20)
fl.tryAppend( S1 )
fl.tryAppend( S2 )
fl.verify()
fl = FaultLine(self.grid , 10)
S1 = FaultSegment(10 , 0)
S2 = FaultSegment(10 , 20)
fl.tryAppend( S1 )
fl.tryAppend( S2 )
S3 = FaultSegment(40 , 30)
self.assertTrue( fl.tryAppend(S3) == False )
self.assertEqual( len(fl) , 2 )
pl = fl.getPolyline( )
self.assertIsInstance( pl , CPolyline )
self.assertEqual( len(pl) , len(fl) + 1 )
S3 = FaultSegment(20 , 30)
fl.tryAppend( S3 )
pl = fl.getPolyline( )
self.assertIsInstance( pl , CPolyline )
self.assertEqual( len(pl) , len(fl) + 1 )
def test_load(self):
faults = FaultCollection(self.grid , self.faults1)
self.assertEqual( 3 , len(faults))
faults.load( self.grid , self.faults2 )
self.assertEqual( 7 , len(faults))
fault1 = faults["F1"]
layer8 = fault1[8]
self.assertEqual( len(layer8) , 1 )
with self.assertRaises(IOError):
faults.load(self.grid , "No/this/does/not/exist")
def test_connect_faults(self):
grid = EclGrid.create_rectangular( (100,100,10) , (1,1,1))
# Fault1 Fault4
# | |
# | |
# | |
# | ------- Fault2 |
# | |
# | |
#
# -------- Fault3
#
fault1 = Fault(grid , "Fault1")
fault2 = Fault(grid , "Fault2")
fault3 = Fault(grid , "Fault3")
fault4 = Fault(grid , "Fault4")
fault1.addRecord(1 , 1 , 10 , grid.getNY() - 1 , 0 , 0 , "X")
fault2.addRecord(5 , 10 , 15 , 15 , 0 , 0 , "Y")
fault3.addRecord(5 , 10 , 5 , 5 , 0 , 0 , "Y")
fault4.addRecord(20 , 20 , 10 , grid.getNY() - 1 , 0 , 0 , "X")
for other_fault in [fault2 , fault3,fault4]:
with self.assertRaises(ValueError):
fault1.extendToFault( other_fault ,0)
with self.assertRaises(ValueError):
fault2.extendToFault( fault3 , 0)
for other_fault in [fault1 , fault2,fault4]:
with self.assertRaises(ValueError):
fault3.extendToFault( other_fault ,0 )
for other_fault in [fault1 , fault2,fault3]:
with self.assertRaises(ValueError):
fault4.extendToFault( other_fault , 0)
ext21 = fault2.extendToFault( fault1 , 0)
self.assertEqual(len(ext21) , 2)
p0 = ext21[0]
p1 = ext21[1]
self.assertEqual(p0 , (5 , 16))
self.assertEqual(p1 , (2 , 16))
ext24 = fault2.extendToFault( fault4,0 )
self.assertEqual(len(ext24) , 2)
p0 = ext24[0]
p1 = ext24[1]
self.assertEqual(p0 , (11 , 16))
self.assertEqual(p1 , (21 , 16))
def test_intersect_intRays(self):
p1 = (0,0)
dir1 = (1,0)
p2 = (0,0)
dir2 = (0,1)
line = Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
self.assertEqual( line , [] )
# Opposite direction
p3 = (-1,0)
dir3 = (-1,0)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p3,dir3))
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p3,dir3),(p1,dir1))
# Parallell with offset
p4 = (0,1)
dir4 = (1,0)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p4,dir4))
p5 = (0,1)
dir5 = (-1,0)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p5,dir5))
p6 = (1,1)
dir6 = (1,0)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p6,dir6))
p2 = (-1,0)
dir2 = (-1,0)
join = Fault.intersectFaultRays(( p2,dir1),(p1,dir2))
self.assertEqual( join , [p2 , p1])
join = Fault.intersectFaultRays(( p1,dir3),(p3,dir1))
self.assertEqual( join , [p1 , p3])
p2 = (1,0)
dir2 = (1,0)
join = Fault.intersectFaultRays(( p1,dir1),(p2,dir2))
self.assertEqual( join , [p1 , p2])
# Orthogonal
p2 = (1,1)
dir2 = (0,1)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
p2 = (0,1)
dir2 = (0,1)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
p2 = (-1,0)
dir2 = (0,1)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
p2 = (-1,1)
dir2 = (0,1)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
p2 = (-1,1)
dir2 = (0,-1)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
p2 = (3,-1)
dir2 = (0,-1)
with self.assertRaises(ValueError):
Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
p2 = (1,-1)
dir2 = (0,1)
join = Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
self.assertEqual(join , [p1 , (1,0) , p2])
p2 = (1,1)
dir2 = (0,-1)
join = Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
self.assertEqual(join , [p1 , (1,0) , p2])
p2 = (0,3)
dir2 = (0,-1)
join = Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
self.assertEqual(join , [p1 , p2])
p2 = (3,0)
dir2 = (0,-1)
join = Fault.intersectFaultRays(( p1,dir1),(p2,dir2 ))
self.assertEqual(join , [p1 , p2])
def test_join_faults(self):
grid = EclGrid.create_rectangular( (100,100,10) , (1,1,1))
# Fault1 Fault4
# | |
# | |
# | |
# | ------- Fault2 |
# | |
# | |
#
# -------- Fault3
#
fault1 = Fault(grid , "Fault1")
fault2 = Fault(grid , "Fault2")
fault3 = Fault(grid , "Fault3")
fault4 = Fault(grid , "Fault4")
fault1.addRecord(1 , 1 , 10 , grid.getNY() - 1 , 0 , 0 , "X")
fault2.addRecord(5 , 10 , 15 , 15 , 0 , 0 , "Y")
fault3.addRecord(5 , 10 , 5 , 5 , 0 , 0 , "Y")
fault4.addRecord(20 , 20 , 10 , grid.getNY() - 1 , 0 , 0 , "X")
rays = fault1.getEndRays(0)
self.assertEqual( rays[0] , [(2,10) , (0,-1)])
self.assertEqual( rays[1] , [(2,100) , (0,1)])
extra = Fault.joinFaults( fault1 , fault3 , 0)
self.assertEqual( extra , [(2,10) , (2,6) , (5,6)] )
def test_contact(self):
grid = EclGrid.create_rectangular( (100,100,10) , (1,1,1))
# Fault1 Fault4
# | |
# | |
# | |
# | ----------------------+-- Fault2
# | |
# | |
#
# -------- Fault3
#
fault1 = Fault(grid , "Fault1")
fault2 = Fault(grid , "Fault2")
fault3 = Fault(grid , "Fault3")
fault4 = Fault(grid , "Fault4")
fault1.addRecord(1 , 1 , 10 , grid.getNY() - 1 , 0 , 0 , "X")
fault2.addRecord(5 , 30 , 15 , 15 , 0 , 0 , "Y")
fault3.addRecord(2 , 10 , 9 , 9 , 0 , 0 , "Y")
fault4.addRecord(20 , 20 , 10 , grid.getNY() - 1 , 0 , 0 , "X")
#self.assertFalse( fault1.intersectsFault(fault2 , 0) )
#self.assertFalse( fault2.intersectsFault(fault1 , 0) )
#self.assertTrue( fault2.intersectsFault(fault4 , 0) )
#self.assertTrue( fault4.intersectsFault(fault2 , 0) )
self.assertTrue( fault1.intersectsFault(fault1 , 0) )
#self.assertTrue( fault3.intersectsFault(fault3 , 0) )
def test_iter(self):
faults = FaultCollection(self.grid , self.faults1 , self.faults2)
self.assertEqual( 7 , len(faults))
c = 0
for f in faults:
c += 1
self.assertEqual( c , len(faults))
for f in ["F1","F2","F3" ,"F4"]:
self.assertTrue( f in faults )
self.assertFalse("FX" in faults )
def test_fault(self):
f = Fault(self.grid , "NAME")
with self.assertRaises(ValueError):
# Invalid face
f.addRecord( 10 , 10 , 11 , 11 , 1 , 43 , "F")
with self.assertRaises(ValueError):
# Invalid coordinates
f.addRecord( -1 , 10 , 11 , 11 , 1 , 43 , "X")
with self.assertRaises(ValueError):
# Invalid coordinates
f.addRecord( 10000 , 10 , 11 , 11 , 1 , 43 , "X")
with self.assertRaises(ValueError):
# Invalid coordinates
f.addRecord( 10 , 9 , 11 , 11 , 1 , 43 , "X")
with self.assertRaises(ValueError):
# Invalid coordinates
f.addRecord( 10 , 9 , 11 , 11 , 1 , 43 , "X")
with self.assertRaises(ValueError):
# Invalid coordinates/face combination
f.addRecord( 10 , 11 , 11 , 11 , 1 , 43 , "X")
with self.assertRaises(ValueError):
# Invalid coordinates/face combination
f.addRecord( 10 , 11 , 11 , 12 , 1 , 43 , "Y")
f.addRecord(10 , 10 , 0 , 10 , 1 , 10 , "X")
def test_segment(self ):
s0 = FaultSegment(0 , 10)
self.assertEqual(s0.getC1() , 0 )
self.assertEqual(s0.getC2() , 10 )
s0.swap()
self.assertEqual(s0.getC1() , 10 )
self.assertEqual(s0.getC2() , 0 )
def test_fault_line(self ):
faults = FaultCollection(self.grid , self.faults1 , self.faults2)
for fault in faults:
for layer in fault:
for fl in layer:
fl.verify()
def test_fault_line_order(self):
nx = 120
ny = 60
nz = 43
grid = EclGrid.create_rectangular( (nx , ny , nz) , (1,1,1) )
with TestAreaContext("python/faults/line_order"):
with open("faults.grdecl" , "w") as f:
f.write("""FAULTS
\'F\' 105 107 50 50 1 43 \'Y\' /
\'F\' 108 108 50 50 1 43 \'X\' /
\'F\' 108 108 50 50 22 43 \'Y\' /
\'F\' 109 109 49 49 1 43 \'Y\' /
\'F\' 110 110 49 49 1 43 \'X\' /
\'F\' 111 111 48 48 1 43 \'Y\' /
/
""")
faults = FaultCollection( grid , "faults.grdecl" )
fault = faults["F"]
layer = fault[29]
self.assertEqual(len(layer) , 2)
line1 = layer[0]
line2 = layer[1]
self.assertEqual(len(line1) , 4)
self.assertEqual(len(line2) , 2)
seg0 = line1[0]
seg1 = line1[1]
seg2 = line1[2]
seg3 = line1[3]
self.assertEqual( seg0.getCorners() , (50 * (nx + 1) + 104 , 50 * (nx + 1) + 107))
self.assertEqual( seg1.getCorners() , (50 * (nx + 1) + 107 , 50 * (nx + 1) + 108))
self.assertEqual( seg2.getCorners() , (50 * (nx + 1) + 108 , 49 * (nx + 1) + 108))
self.assertEqual( seg3.getCorners() , (49 * (nx + 1) + 108 , 49 * (nx + 1) + 109))
def test_neighbour_cells(self):
nx = 10
ny = 8
nz = 7
grid = EclGrid.create_rectangular( (nx , ny , nz) , (1,1,1) )
faults_file = self.createTestPath("local/ECLIPSE/FAULTS/faults_nb.grdecl")
faults = FaultCollection( grid , faults_file )
fault = faults["FY"]
self.assertEqual(len(fault),1)
fault_layer = fault[0]
fl1 = fault_layer[0]
nb_cells1 = fl1.getNeighborCells()
true_nb_cells1 = [(0, nx) , (1,nx + 1), (2,nx+2) , (3,nx + 3) , (4,nx+4)]
self.assertListEqual( nb_cells1 , true_nb_cells1 )
fl2 = fault_layer[1]
nb_cells2 = fl2.getNeighborCells()
true_nb_cells2 = [(6, nx+6) , (7,nx + 7), (8 , nx+8) , (9,nx + 9)]
self.assertListEqual( nb_cells2 , true_nb_cells2 )
nb_cells = fault_layer.getNeighborCells()
self.assertListEqual( nb_cells , true_nb_cells1 + true_nb_cells2)
fault = faults["FY0"]
fault_layer = fault[0]
fl1 = fault_layer[0]
nb_cells1 = fl1.getNeighborCells()
true_nb_cells1 = [(-1,0) , (-1,1), (-1,2)]
self.assertListEqual( nb_cells1 , true_nb_cells1 )
fault = faults["FYNY"]
fault_layer = fault[0]
fl1 = fault_layer[0]
nb_cells1 = fl1.getNeighborCells()
true_nb_cells1 = [(nx * (ny - 1) , -1), (nx * (ny - 1) + 1 , -1), (nx * (ny - 1) + 2, -1)]
self.assertListEqual( nb_cells1 , true_nb_cells1 )
fault = faults["FX"]
fault_layer = fault[0]
fl1 = fault_layer[0]
nb_cells1 = fl1.getNeighborCells()
true_nb_cells1 = [(0,1) , (nx , nx+1) , (2*nx , 2*nx + 1)]
self.assertListEqual( nb_cells1 , true_nb_cells1 )
fault = faults["FX0"]
fault_layer = fault[0]
fl1 = fault_layer[0]
nb_cells1 = fl1.getNeighborCells()
true_nb_cells1 = [(-1 , 0) , (-1 , nx) , (-1 , 2*nx)]
self.assertListEqual( nb_cells1 , true_nb_cells1 )
fault = faults["FXNX"]
fault_layer = fault[0]
fl1 = fault_layer[0]
nb_cells1 = fl1.getNeighborCells()
true_nb_cells1 = [(nx -1 , -1) , (2*nx -1 , -1) , (3*nx - 1 , -1)]
self.assertListEqual( nb_cells1 , true_nb_cells1 )
def test_polyline_intersection(self):
grid = EclGrid.create_rectangular( (100,100,10) , (0.25 , 0.25 , 1))
# Fault1 Fault4
# | |
# | |
# | |
# | ------- Fault2 |
# | |
# | |
# (5 , 2.50)
# -------- Fault3
#
fault1 = Fault(grid , "Fault1")
fault2 = Fault(grid , "Fault2")
fault3 = Fault(grid , "Fault3")
fault4 = Fault(grid , "Fault4")
fault1.addRecord(1 , 1 , 10 , grid.getNY() - 1 , 0 , 0 , "X")
fault2.addRecord(5 , 10 , 15 , 15 , 0 , 0 , "Y")
fault3.addRecord(5 , 10 , 5 , 5 , 0 , 0 , "Y")
fault4.addRecord(20 , 20 , 10 , grid.getNY() - 1 , 0 , 0 , "X")
polyline = Polyline( init_points = [(4 , 4) , (8,4)])
self.assertTrue( fault4.intersectsPolyline( polyline , 0))
cpolyline = CPolyline( init_points = [(4 , 4) , (8,4)])
self.assertTrue( fault4.intersectsPolyline( cpolyline , 0))
polyline = Polyline( init_points = [(8 , 4) , (16,4)])
self.assertFalse( fault4.intersectsPolyline( polyline , 0))
cpolyline = CPolyline( init_points = [(8 , 4) , (16,4)])
self.assertFalse( fault4.intersectsPolyline( cpolyline , 0))
def test_num_linesegment(self):
nx = 10
ny = 10
nz = 1
grid = EclGrid.create_rectangular( (nx , ny , nz) , (1,1,1) )
with TestAreaContext("python/faults/line_order"):
with open("faults.grdecl" , "w") as f:
f.write("""FAULTS
\'F1\' 1 4 2 2 1 1 \'Y\' /
\'F1\' 6 8 2 2 1 1 \'Y\' /
\'F2\' 1 8 2 2 1 1 \'Y\' /
/
""")
faults = FaultCollection( grid , "faults.grdecl" )
f1 = faults["F1"]
f2 = faults["F2"]
self.assertEqual( 2 , f1.numLines(0))
self.assertEqual( 1 , f2.numLines(0))
def test_extend_to_polyline(self):
grid = EclGrid.create_rectangular( (3,3,1) , (1 , 1 , 1))
# o o o o
#
# o---o---o---o
#
# o===+ o o
# |
# o o o o
fault1 = Fault(grid , "Fault")
fault1.addRecord(0 , 0 , 0 , 0 , 0 , 0 , "X-")
fault1.addRecord(0 , 0 , 0 , 0 , 0 , 0 , "Y")
polyline = CPolyline( init_points = [(0,2) , (3,2)])
points = fault1.extendToPolyline( polyline , 0 )
self.assertEqual( points , [(1,1) , (2,2)])
end_join = fault1.endJoin( polyline , 0 )
self.assertEqual( end_join, [(1,1) , (0,2)] )
polyline2 = CPolyline( init_points = [(0.8,2) , (0.8,0.8)])
end_join = fault1.endJoin( polyline2 , 0 )
self.assertIsNone( end_join )
def test_extend_polyline_on(self):
grid = EclGrid.create_rectangular( (3,3,1) , (1 , 1 , 1))
# o o o o
#
# o---o---o---o
#
# o===o===o===o
#
# o o o o
fault1 = Fault(grid , "Fault")
fault1.addRecord(0 , 2 , 0 , 0 , 0 , 0 , "Y")
polyline0 = CPolyline( init_points = [(0,2)])
polyline1 = CPolyline( init_points = [(0,2) , (3,2)])
polyline2 = CPolyline( init_points = [(1,3) , (1,2)])
polyline3 = CPolyline( init_points = [(1,3) , (1,0)])
with self.assertRaises(ValueError):
fault1.extendPolylineOnto( polyline0 , 0 )
points = fault1.extendPolylineOnto( polyline1 , 0 )
self.assertIsNone( points )
points = fault1.extendPolylineOnto( polyline2 , 0)
self.assertEqual( points , [(1,2) , (1,1)])
points = fault1.extendPolylineOnto( polyline3 , 0)
self.assertIsNone( points )
def test_stepped(self):
grid = EclGrid.create_rectangular( (6,1,4) , (1,1,1))
f = Fault(grid , "F")
f.addRecord(4,4,0,0,0,1,"X")
f.addRecord(2,2,0,0,1,1,"Z")
f.addRecord(1,1,0,0,2,3,"X")
block_kw = EclKW.create("FAULTBLK" , grid.getGlobalSize() , EclTypeEnum.ECL_INT_TYPE)
block_kw.assign(1)
block_kw[5] = 2
block_kw[11] = 2
block_kw[14:18] = 2
block_kw[14:18] = 2
block_kw[20:23] = 2
layer0 = FaultBlockLayer( grid , 0 )
layer0.scanKeyword( block_kw )
layer0.addFaultBarrier( f )
self.assertTrue( layer0.cellContact((0,0) , (1,0)))
self.assertFalse( layer0.cellContact((4,0) , (5,0)))
layer1 = FaultBlockLayer( grid , 1 )
layer1.scanKeyword( block_kw )
layer1.addFaultBarrier( f )
self.assertTrue( layer1.cellContact((0,0) , (1,0)))
self.assertFalse( layer1.cellContact((4,0) , (5,0)))
layer2 = FaultBlockLayer( grid , 2 )
layer2.scanKeyword( block_kw )
layer2.addFaultBarrier( f )
self.assertTrue( layer2.cellContact((0,0) , (1,0)))
self.assertFalse( layer2.cellContact((1,0) , (2,0)))
layer3 = FaultBlockLayer( grid , 3 )
layer3.scanKeyword( block_kw )
layer3.addFaultBarrier( f )
self.assertTrue( layer3.cellContact((0,0) , (1,0)))
self.assertFalse( layer3.cellContact((1,0) , (2,0)))
def test_connectWithPolyline(self):
grid = EclGrid.create_rectangular( (4,4,1) , (1 , 1 , 1))
# o o o o o
#
# o o o o o
#
# o---o---o---o---o
#
# o o o o o
# |
# o o o o o
fault1 = Fault(grid , "Fault1")
fault1.addRecord(0 , 3 , 1 , 1 , 0 , 0 , "Y")
fault2 = Fault(grid , "Fault2")
fault2.addRecord(1 , 1 , 0 , 0 , 0 , 0 , "X")
fault3 = Fault(grid , "Fault3")
fault3.addRecord(1 , 1 , 0 , 2 , 0 , 0 , "X")
self.assertIsNone( fault3.connect( fault1 , 0 ))
intersect = fault2.connect( fault1 , 0 )
self.assertEqual( len(intersect) , 2 )
p1 = intersect[0]
p2 = intersect[1]
self.assertEqual( p1 , (2,1))
self.assertEqual( p2 , (2,2))
| gpl-3.0 | 8,771,375,853,594,739,000 | 31.619347 | 98 | 0.479684 | false |
llazzaro/packyou | packyou/py2.py | 1 | 10645 | # -*- coding: utf-8 -*-
import imp
import ipdb
import logging
from sys import modules, meta_path
from os import mkdir
from os.path import (
isdir,
abspath,
dirname,
exists,
join,
)
import encodings.idna
import requests
from git import Repo
from packyou import find_module_path_in_cloned_repos
from packyou.utils import walklevel, memoize
MODULES_PATH = dirname(abspath(__file__))
LOGGER = logging.getLogger(__name__)
class GithubLoader(object):
"""
Import hook that will allow to import from a github repo.
"""
def __init__(self, repo_url=None, path=None, username=None, repository_name=None):
self.path = path
self.repo_url = repo_url
self.username = username
self.repository_name = repository_name
def check_root(self, fullname):
"""
#Sometimes the code is a python package or similar and there is a directory
#which contains all the code.
This method is used to search first on the root of the cloned repository for the
imported module.
"""
parent, _, module_name = fullname.rpartition('.')
if self.username and self.repository_name:
# REVISAR QUE PASE TODOS LOS PATHS
cloned_root = join(self.path[0], 'github', self.username, self.repository_name)
candidate_path = join(cloned_root, module_name)
if exists(candidate_path):
return candidate_path
for root, dirs, files in walklevel(cloned_root, level=1):
pass
def get_source(self, fullname):
filename = self.get_filename(fullname)
with open(filename, 'r') as source_file:
return source_file.read()
def get_code(self, fullname):
source = self.get_source(fullname)
return compile(source, self.get_filename(fullname), 'exec', dont_inherit=True)
def get_filename(self, fullname):
parent, _, current_module = fullname.rpartition('.')
filename = None
LOGGER.debug('Fullname {0} self.path {1}'.format(fullname, self.path))
for path in self.path:
package_path = join(path, '__init__.py')
if exists(package_path):
filename = package_path
module_path = '{0}.py'.format(join(path, current_module))
if exists(module_path):
filename = module_path
LOGGER.debug('get_filename({0}) is {1}'.format(fullname, filename))
return filename
def is_package(self, fullname):
filename = self.get_filename(fullname)
return not exists(filename) or isdir(filename)
def get_or_create_module(self, fullname):
"""
Given a name and a path it will return a module instance
if found.
When the module could not be found it will raise ImportError
"""
LOGGER.info('Loading module {0}'.format(fullname))
parent, _, module_name = fullname.rpartition('.')
if fullname in modules:
LOGGER.info('Found cache entry for {0}'.format(fullname))
return modules[fullname]
module = modules.setdefault(fullname, imp.new_module(fullname))
if len(fullname.strip('.')) > 3:
absolute_from_root = fullname.split('.', 3)[-1]
modules.setdefault(absolute_from_root, module)
if len(fullname.split('.')) == 4:
# add the root of the project
modules[fullname.split('.')[-1]] = module
# required by PEP 302
module.__file__ = self.get_filename(fullname)
LOGGER.info('Created module {0} with fullname {1}'.format(self.get_filename(fullname), fullname))
module.__name__ = fullname
module.__loader__ = self
module.__path__ = self.path
if self.is_package(fullname):
module.__path__ = self.path
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
LOGGER.debug('loading file {0}'.format(self.get_filename(fullname)))
source = self.get_source(fullname)
try:
exec(source, module.__dict__)
except Exception as ex:
ipdb.set_trace()
return module
def clone_github_repo(self):
"""
Clones a github repo with a username and repository_name
"""
if not (self.username and self.repository_name):
return
repository_local_destination = join(MODULES_PATH, 'github', self.username, self.repository_name)
if not exists(repository_local_destination):
Repo.clone_from(self.repo_url, repository_local_destination, branch='master')
init_filename = join(repository_local_destination, '__init__.py')
open(init_filename, 'a').close()
@property
def project_fullname(self):
return 'packyou.github.{0}.{1}'.format(self.username, self.repository_name)
def load_module(self, fullname):
"""
Given a name it will load the module from github.
When the project is not locally stored it will clone the
repo from github.
"""
module = None
splitted_names = fullname.split('.')
_, _, module_name = fullname.rpartition('.')
_, remaining = find_module_path_in_cloned_repos(fullname)
if 'github' in splitted_names and not remaining:
self.clone_github_repo()
if len(splitted_names) == 2:
module = self.get_or_create_module(fullname)
if len(splitted_names) == 3:
username_directory = join(MODULES_PATH, 'github', self.username)
if not exists(username_directory):
mkdir(username_directory)
username_init_filename = join(MODULES_PATH, 'github', self.username, '__init__.py')
open(username_init_filename, 'a').close()
module = self.get_or_create_module(fullname)
if len(splitted_names) >= 4:
module = self.get_or_create_module(fullname)
elif self.username and self.repository_name:
# relative import from project root.
fullname = 'packyou.github.{0}.{1}.{2}'.format(self.username, self.repository_name, remaining)
module = self.get_or_create_module(fullname)
if module:
modules[fullname] = module
if remaining is not None:
modules[remaining] = module
return module
class GithubFinder(object):
def __init__(self):
self.username = None
self.repository_name = None
@memoize
def check_repository_available(self, username, repository_name):
"""
Sometimes github has a - in the username or repository name.
The - can't be used in the import statement.
"""
repo_url = 'https://github.com/{0}/{1}.git'.format(username, repository_name)
response = requests.get(repo_url)
if response.status_code == 404:
if '_' in username:
repo_url = 'https://github.com/{0}/{1}.git'.format(username.replace('_', '-'), repository_name)
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
if '_' in repository_name:
repo_url = 'https://github.com/{0}/{1}.git'.format(username, repository_name.replace('_', '-'))
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
repo_url = 'https://github.com/{0}/{1}.git'.format(username.replace('_', '-'), repository_name.replace('_', '-'))
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
raise ImportError('Github repository not found.')
return repo_url
def find_module_in_cloned_repos(self, fullname):
return find_module_in_cloned_repos(fullname, GithubLoader)
def find_module(self, fullname, path=None):
"""
Finds a module and returns a module loader when
the import uses packyou
"""
LOGGER.info('Finding {0}'.format(fullname))
partent, _, module_name = fullname.rpartition('.')
path, _ = find_module_path_in_cloned_repos(fullname)
LOGGER.debug('FOUND PATH {0}'.format(path))
try:
# sometimes the project imported from github does an
# "import x" (absolute import), this translates to import github...x
# we try first to do an import x and cache the module in the sys.path.
# and return None if the imp.find_module was successful.
# This will allow python finders in the meta_path to do the import, and not packyou
# loaders.
if not path:
imp.find_module(module_name)
LOGGER.info('Absolute import: {0}. Original fullname {1}'.format(module_name, fullname))
return None
except ImportError:
LOGGER.debug('imp.find_module could not find {0}. this is ussually fine.'.format(module_name))
if 'packyou.github' in fullname:
fullname_parts = fullname.split('.')
repo_url = None
if len(fullname_parts) >= 3:
self.username = fullname.split('.')[2]
if len(fullname_parts) >= 4:
if not self.repository_name:
LOGGER.debug('FULLNAME -> {0} '.format(fullname))
self.repository_name = fullname.split('.')[3]
repo_url = self.check_repository_available(self.username, self.repository_name)
current_path = dirname(abspath(__file__))
repo_path = join(current_path, 'github', self.username, self.repository_name)
if repo_path not in path:
path.insert(0, repo_path)
LOGGER.info('Found {0} with path {1}'.format(fullname, path))
return GithubLoader(repo_url, path, self.username, self.repository_name)
elif self.username and self.repository_name and path:
LOGGER.info('Fullname {0} does not start with packyou, searching in cloned repos. Result was {1}'.format(fullname, path))
repo_url = self.check_repository_available(self.username, self.repository_name)
return GithubLoader(repo_url, path, self.username, self.repository_name)
LOGGER.info('Not found -> {0}'.format(fullname))
meta_path.append(GithubFinder())
| mit | -2,669,128,757,194,921,500 | 40.745098 | 133 | 0.589854 | false |
parapente/beets | beetsplug/fetchart.py | 1 | 25044 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from contextlib import closing
import os
import re
from tempfile import NamedTemporaryFile
import requests
from beets import plugins
from beets import importer
from beets import ui
from beets import util
from beets import config
from beets.util.artresizer import ArtResizer
try:
import itunes
HAVE_ITUNES = True
except ImportError:
HAVE_ITUNES = False
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg']
CONTENT_TYPES = ('image/jpeg', 'image/png')
DOWNLOAD_EXTENSION = '.jpg'
CANDIDATE_BAD = 0
CANDIDATE_EXACT = 1
CANDIDATE_DOWNSCALE = 2
def _logged_get(log, *args, **kwargs):
"""Like `requests.get`, but logs the effective URL to the specified
`log` at the `DEBUG` level.
Use the optional `message` parameter to specify what to log before
the URL. By default, the string is "getting URL".
Also sets the User-Agent header to indicate beets.
"""
# Use some arguments with the `send` call but most with the
# `Request` construction. This is a cheap, magic-filled way to
# emulate `requests.get` or, more pertinently,
# `requests.Session.request`.
req_kwargs = kwargs
send_kwargs = {}
for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'):
if arg in kwargs:
send_kwargs[arg] = req_kwargs.pop(arg)
# Our special logging message parameter.
if 'message' in kwargs:
message = kwargs.pop('message')
else:
message = 'getting URL'
req = requests.Request(b'GET', *args, **req_kwargs)
with requests.Session() as s:
s.headers = {b'User-Agent': b'beets'}
prepped = s.prepare_request(req)
log.debug('{}: {}', message, prepped.url)
return s.send(prepped, **send_kwargs)
class RequestMixin(object):
"""Adds a Requests wrapper to the class that uses the logger, which
must be named `self._log`.
"""
def request(self, *args, **kwargs):
"""Like `requests.get`, but uses the logger `self._log`.
See also `_logged_get`.
"""
return _logged_get(self._log, *args, **kwargs)
# ART SOURCES ################################################################
class ArtSource(RequestMixin):
def __init__(self, log, config):
self._log = log
self._config = config
def get(self, album):
raise NotImplementedError()
class CoverArtArchive(ArtSource):
"""Cover Art Archive"""
URL = 'http://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front'
def get(self, album):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
if album.mb_albumid:
yield self.URL.format(mbid=album.mb_albumid)
if album.mb_releasegroupid:
yield self.GROUP_URL.format(mbid=album.mb_releasegroupid)
class Amazon(ArtSource):
URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2)
def get(self, album):
"""Generate URLs using Amazon ID (ASIN) string.
"""
if album.asin:
for index in self.INDICES:
yield self.URL % (album.asin, index)
class AlbumArtOrg(ArtSource):
"""AlbumArt.org scraper"""
URL = 'http://www.albumart.org/index_detail.php'
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(self, album):
"""Return art URL from AlbumArt.org using album ASIN.
"""
if not album.asin:
return
# Get the page from albumart.org.
try:
resp = self.request(self.URL, params={'asin': album.asin})
self._log.debug(u'scraped art URL: {0}', resp.url)
except requests.RequestException:
self._log.debug(u'error scraping art page')
return
# Search the page for the image URL.
m = re.search(self.PAT, resp.text)
if m:
image_url = m.group(1)
yield image_url
else:
self._log.debug(u'no image found on page')
class GoogleImages(ArtSource):
URL = u'https://www.googleapis.com/customsearch/v1'
def get(self, album):
"""Return art URL from google custom search engine
given an album title and interpreter.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ',' + album.album).encode('utf-8')
response = self.request(self.URL, params={
'key': self._config['google_key'].get(),
'cx': self._config['google_engine'].get(),
'q': search_string,
'searchType': 'image'
})
# Get results using JSON.
try:
data = response.json()
except ValueError:
self._log.debug(u'google: error loading response: {}'
.format(response.text))
return
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug(u'google fetchart error: {0}', reason)
return
if 'items' in data.keys():
for item in data['items']:
yield item['link']
class ITunesStore(ArtSource):
# Art from the iTunes Store.
def get(self, album):
"""Return art URL from iTunes Store given an album title.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ' ' + album.album).encode('utf-8')
try:
# Isolate bugs in the iTunes library while searching.
try:
results = itunes.search_album(search_string)
except Exception as exc:
self._log.debug('iTunes search failed: {0}', exc)
return
# Get the first match.
if results:
itunes_album = results[0]
else:
self._log.debug('iTunes search for {:r} got no results',
search_string)
return
if itunes_album.get_artwork()['100']:
small_url = itunes_album.get_artwork()['100']
big_url = small_url.replace('100x100', '1200x1200')
yield big_url
else:
self._log.debug(u'album has no artwork in iTunes Store')
except IndexError:
self._log.debug(u'album not found in iTunes Store')
class Wikipedia(ArtSource):
# Art from Wikipedia (queried through DBpedia)
DBPEDIA_URL = 'http://dbpedia.org/sparql'
WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php'
SPARQL_QUERY = '''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dbpprop: <http://dbpedia.org/property/>
PREFIX owl: <http://dbpedia.org/ontology/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT DISTINCT ?pageId ?coverFilename WHERE {{
?subject owl:wikiPageID ?pageId .
?subject dbpprop:name ?name .
?subject rdfs:label ?label .
{{ ?subject dbpprop:artist ?artist }}
UNION
{{ ?subject owl:artist ?artist }}
{{ ?artist foaf:name "{artist}"@en }}
UNION
{{ ?artist dbpprop:name "{artist}"@en }}
?subject rdf:type <http://dbpedia.org/ontology/Album> .
?subject dbpprop:cover ?coverFilename .
FILTER ( regex(?name, "{album}", "i") )
}}
Limit 1'''
def get(self, album):
if not (album.albumartist and album.album):
return
# Find the name of the cover art filename on DBpedia
cover_filename, page_id = None, None
dbpedia_response = self.request(
self.DBPEDIA_URL,
params={
'format': 'application/sparql-results+json',
'timeout': 2500,
'query': self.SPARQL_QUERY.format(
artist=album.albumartist.title(), album=album.album)
},
headers={'content-type': 'application/json'},
)
try:
data = dbpedia_response.json()
results = data['results']['bindings']
if results:
cover_filename = 'File:' + results[0]['coverFilename']['value']
page_id = results[0]['pageId']['value']
else:
self._log.debug('wikipedia: album not found on dbpedia')
except (ValueError, KeyError, IndexError):
self._log.debug('wikipedia: error scraping dbpedia response: {}',
dbpedia_response.text)
# Ensure we have a filename before attempting to query wikipedia
if not (cover_filename and page_id):
return
# DBPedia sometimes provides an incomplete cover_filename, indicated
# by the filename having a space before the extension, e.g., 'foo .bar'
# An additional Wikipedia call can help to find the real filename.
# This may be removed once the DBPedia issue is resolved, see:
# https://github.com/dbpedia/extraction-framework/issues/396
if ' .' in cover_filename and \
'.' not in cover_filename.split(' .')[-1]:
self._log.debug(
'wikipedia: dbpedia provided incomplete cover_filename'
)
lpart, rpart = cover_filename.rsplit(' .', 1)
# Query all the images in the page
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'images',
'pageids': page_id,
},
headers={'content-type': 'application/json'},
)
# Try to see if one of the images on the pages matches our
# imcomplete cover_filename
try:
data = wikipedia_response.json()
results = data['query']['pages'][page_id]['images']
for result in results:
if re.match(re.escape(lpart) + r'.*?\.' + re.escape(rpart),
result['title']):
cover_filename = result['title']
break
except (ValueError, KeyError):
self._log.debug(
'wikipedia: failed to retrieve a cover_filename'
)
return
# Find the absolute url of the cover art on Wikipedia
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'imageinfo',
'iiprop': 'url',
'titles': cover_filename.encode('utf-8'),
},
headers={'content-type': 'application/json'},
)
try:
data = wikipedia_response.json()
results = data['query']['pages']
for _, result in results.iteritems():
image_url = result['imageinfo'][0]['url']
yield image_url
except (ValueError, KeyError, IndexError):
self._log.debug('wikipedia: error scraping imageinfo')
return
class FileSystem(ArtSource):
"""Art from the filesystem"""
@staticmethod
def filename_priority(filename, cover_names):
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have
higher priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(self, path, cover_names, cautious):
"""Look for album art files in a specified directory.
"""
if not os.path.isdir(path):
return
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(path):
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b'.' + ext.encode('utf8')) and \
os.path.isfile(os.path.join(path, fn)):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(images,
key=lambda x: self.filename_priority(x, cover_names))
cover_pat = br"(\b|_)({0})(\b|_)".format(b'|'.join(cover_names))
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
self._log.debug(u'using well-named art file {0}',
util.displayable_path(fn))
return os.path.join(path, fn)
# Fall back to any image in the folder.
if images and not cautious:
self._log.debug(u'using fallback art file {0}',
util.displayable_path(images[0]))
return os.path.join(path, images[0])
# Try each source in turn.
SOURCES_ALL = [u'coverart', u'itunes', u'amazon', u'albumart',
u'wikipedia', u'google']
ART_SOURCES = {
u'coverart': CoverArtArchive,
u'itunes': ITunesStore,
u'albumart': AlbumArtOrg,
u'amazon': Amazon,
u'wikipedia': Wikipedia,
u'google': GoogleImages,
}
# PLUGIN LOGIC ###############################################################
class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
def __init__(self):
super(FetchArtPlugin, self).__init__()
self.config.add({
'auto': True,
'minwidth': 0,
'maxwidth': 0,
'enforce_ratio': False,
'remote_priority': False,
'cautious': False,
'cover_names': ['cover', 'front', 'art', 'album', 'folder'],
'sources': ['coverart', 'itunes', 'amazon', 'albumart'],
'google_key': None,
'google_engine': u'001442825323518660753:hrh5ch1gjzm',
})
self.config['google_key'].redact = True
# Holds paths to downloaded images between fetching them and
# placing them in the filesystem.
self.art_paths = {}
self.minwidth = self.config['minwidth'].get(int)
self.maxwidth = self.config['maxwidth'].get(int)
self.enforce_ratio = self.config['enforce_ratio'].get(bool)
if self.config['auto']:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
available_sources = list(SOURCES_ALL)
if not HAVE_ITUNES and u'itunes' in available_sources:
available_sources.remove(u'itunes')
if not self.config['google_key'].get() and \
u'google' in available_sources:
available_sources.remove(u'google')
sources_name = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
self.sources = [ART_SOURCES[s](self._log, self.config)
for s in sources_name]
self.fs_source = FileSystem(self._log, self.config)
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.album.artpath and os.path.isfile(task.album.artpath):
# Album already has art (probably a re-import); skip it.
return
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
path = self.art_for_album(task.album, task.paths, local)
if path:
self.art_paths[task] = path
# Synchronous; after music files are put in place.
def assign_art(self, session, task):
"""Place the discovered art in the filesystem."""
if task in self.art_paths:
path = self.art_paths.pop(task)
album = task.album
src_removed = (config['import']['delete'].get(bool) or
config['import']['move'].get(bool))
album.set_art(path, not src_removed)
album.store()
if src_removed:
task.prune(path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option('-f', '--force', dest='force',
action='store_true', default=False,
help='re-download art when already present')
def func(lib, opts, args):
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force)
cmd.func = func
return [cmd]
# Utilities converted from functions to methods on logging overhaul
def _fetch_image(self, url):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
try:
with closing(self.request(url, stream=True,
message='downloading image')) as resp:
if 'Content-Type' not in resp.headers \
or resp.headers['Content-Type'] not in CONTENT_TYPES:
self._log.debug(
'not a supported image: {}',
resp.headers.get('Content-Type') or 'no content type',
)
return None
# Generate a temporary file with the correct extension.
with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION,
delete=False) as fh:
for chunk in resp.iter_content(chunk_size=1024):
fh.write(chunk)
self._log.debug(u'downloaded art to: {0}',
util.displayable_path(fh.name))
return fh.name
except (IOError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug('error fetching art: {}', exc)
return None
def _is_valid_image_candidate(self, candidate):
"""Determine whether the given candidate artwork is valid based on
its dimensions (width and ratio).
Return `CANDIDATE_BAD` if the file is unusable.
Return `CANDIDATE_EXACT` if the file is usable as-is.
Return `CANDIDATE_DOWNSCALE` if the file must be resized.
"""
if not candidate:
return CANDIDATE_BAD
if not (self.enforce_ratio or self.minwidth or self.maxwidth):
return CANDIDATE_EXACT
# get_size returns None if no local imaging backend is available
size = ArtResizer.shared.get_size(candidate)
self._log.debug('image size: {}', size)
if not size:
self._log.warning(u'Could not get size of image (please see '
u'documentation for dependencies). '
u'The configuration options `minwidth` and '
u'`enforce_ratio` may be violated.')
return CANDIDATE_EXACT
# Check minimum size.
if self.minwidth and size[0] < self.minwidth:
self._log.debug('image too small ({} < {})',
size[0], self.minwidth)
return CANDIDATE_BAD
# Check aspect ratio.
if self.enforce_ratio and size[0] != size[1]:
self._log.debug('image is not square ({} != {})',
size[0], size[1])
return CANDIDATE_BAD
# Check maximum size.
if self.maxwidth and size[0] > self.maxwidth:
self._log.debug('image needs resizing ({} > {})',
size[0], self.maxwidth)
return CANDIDATE_DOWNSCALE
return CANDIDATE_EXACT
def art_for_album(self, album, paths, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `local_only`, then only local
image files from the filesystem are returned; no network requests
are made.
"""
out = None
check = None
# Local art.
cover_names = self.config['cover_names'].as_str_seq()
cover_names = map(util.bytestring_path, cover_names)
cautious = self.config['cautious'].get(bool)
if paths:
for path in paths:
candidate = self.fs_source.get(path, cover_names, cautious)
check = self._is_valid_image_candidate(candidate)
if check:
out = candidate
self._log.debug('found local image {}', out)
break
# Web art sources.
remote_priority = self.config['remote_priority'].get(bool)
if not local_only and (remote_priority or not out):
for url in self._source_urls(album):
if self.maxwidth:
url = ArtResizer.shared.proxy_url(self.maxwidth, url)
candidate = self._fetch_image(url)
check = self._is_valid_image_candidate(candidate)
if check:
out = candidate
self._log.debug('using remote image {}', out)
break
if self.maxwidth and out and check == CANDIDATE_DOWNSCALE:
out = ArtResizer.shared.resize(self.maxwidth, out)
return out
def batch_fetch_art(self, lib, albums, force):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force and os.path.isfile(album.artpath):
message = ui.colorize('text_highlight_minor', 'has album art')
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
path = self.art_for_album(album, local_paths)
if path:
album.set_art(path, False)
album.store()
message = ui.colorize('text_success', 'found album art')
else:
message = ui.colorize('text_error', 'no art found')
self._log.info(u'{0}: {1}', album, message)
def _source_urls(self, album):
"""Generate possible source URLs for an album's art. The URLs are
not guaranteed to work so they each need to be attempted in turn.
This allows the main `art_for_album` function to abort iteration
through this sequence early to avoid the cost of scraping when not
necessary.
"""
source_names = {v: k for k, v in ART_SOURCES.items()}
for source in self.sources:
self._log.debug(
'trying source {0} for album {1.albumartist} - {1.album}',
source_names[type(source)],
album,
)
urls = source.get(album)
for url in urls:
yield url
| mit | 105,766,198,395,307,200 | 36.773756 | 80 | 0.550711 | false |
mrooney/wxbanker | wxbanker/art/transparent.py | 1 | 1370 | #!/usr/bin/env python
#
# https://launchpad.net/wxbanker
# transparent.py: Copyright 2007-2010 Mike Rooney <[email protected]>
#
# This file is part of wxBanker.
#
# wxBanker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wxBanker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with wxBanker. If not, see <http://www.gnu.org/licenses/>.
"""
This file exists because wxPython wx.EmptyBitmapRGBA does not exist on all platforms.
"""
from wxbanker.art.embeddedimage import PyEmbeddedImage
catalog = {}
index = []
transparent = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAAZiS0dE"
"AP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB9oCFgEaIY1FTngAAAAZ"
"dEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAAEklEQVQ4y2NgGAWjYBSMAggA"
"AAQQAAGFP6pyAAAAAElFTkSuQmCC")
index.append('transparent')
catalog['transparent'] = transparent | gpl-3.0 | -8,961,762,089,369,331,000 | 38.171429 | 85 | 0.764964 | false |
amudalab/concept-graphs | keyphrase/keyphrase/extractgrams.py | 1 | 6091 | import os
import sys
from stopwords_video import vid_stopwords
from stop_words import stopword
import nltk
import re
from nltk import stem
stemmer = stem.PorterStemmer()
from operator import itemgetter
import math
try:
folder_name=sys.argv[1]
folder_name="output/"+folder_name.split(".txt")[0]
max_gram=sys.argv[2]
#min_freq=sys.argv[3]
output_folder="output"
ext="_vgrams.txt"
except:
print "No Mandatory Parameters"
sys.exit()
global start_end_stop
start_end_stop=['of','in','at','for',"a","an","the"]
global end_stop
end_stop=["a","an","the"]
global symbols
symbols=["+","-","*","/","-","@","?",".","(",")","[","]","{","}"]
global video
global numbers
numbers=['1','2','3','4','5','6','7','8','9','0']
video=[]
global max_height
max_height=0
global phrase
phrase=[]
#Initially Unique text sides for each video has to be identified and stored inside a folder "X" under input folder
#dir_name="input/ds4"
#res_file="ds4.key"
files=os.listdir(folder_name)
text_files=[]
for f in files:
if f.endswith(".txt"):
text_files.append(f)
class video_text:
def __init__(self,doc_name,height,text):
self.doc_name=doc_name
self.height=height
self.text=text
self.cand_phrase={}
self.ext=ext
def extract_grams(self,token,token_size,height):
grams=[]
grams.extend(nltk.ngrams(token,token_size))
for g in grams:
word=""
len_g=len(g)
if g[0] not in start_end_stop and g[len_g-1] not in end_stop and g[len_g-1] not in start_end_stop and g[0] not in stopword and g[len_g-1] not in stopword:
for t in g:
word+=t+" "
word=word.rstrip(" ")
word=remove_special_case(word)
if word not in (self.cand_phrase).keys():
self.cand_phrase[word]=1
if word not in phrase:
phrase.append(word)
else:
self.cand_phrase[word]+=1
else:
if token_size-1>=1:
self.extract_grams(token,token_size-1,height)
def extract_candidate_phrases(self):
for t in range(len(self.text)):
self.text[t]=remove_hyphen(self.text[t])
self.text[t]=remove_special_case(self.text[t])
if ((len(self.text[t].split())>=1 and len(self.text[t].split())<=max_gram)) and self.text[t] not in vid_stopwords and stop_word_check(self.text[t]):
if self.text[t] not in (self.cand_phrase).keys():
self.cand_phrase[self.text[t]]=1
if self.text[t] not in phrase:
phrase.append(self.text[t])
else:
self.cand_phrase[self.text[t]]+=1
else:
if len(self.text[t].split())>=max_gram:
token=(self.text[t]).split()
self.extract_grams(token,max_gram,self.height[t])
#print self.doc_name
#print self.cand_phrase
def remove_hyphen(g):
if g.count("-")>0:
temp=g.split("-")
l=len(temp)
word=""
for i in range(l):
word+=temp[i]+" "
g=word.rstrip(" ")
return g
def remove_special_case(g):
token=g.split()
word=""
for t in token:
t=t.rstrip(" ")
t=t.lstrip(" ")
t=t.rstrip("()")
t=t.lstrip("()")
t=t.lstrip("(")
t=t.rstrip("(")
t=t.rstrip(")")
t=t.lstrip(")")
t=t.lstrip("[")
t=t.rstrip("]")
t=t.lstrip("{")
t=t.rstrip("}")
t=t.rstrip(":")
t=t.lstrip(":")
t=t.lstrip(",")
t=t.rstrip(",")
t=t.rstrip(".")
word+=t+" "
word=word.rstrip(" ")
#print word
return word
def stop_word_check(t):
token=t.split()
token_len=len(token)
if token[0] not in start_end_stop and token[token_len-1] not in start_end_stop and token[token_len-1] not in end_stop and token[0] not in stopword and token[token_len-1] not in stopword:
return 1
else:
return 0
def remove_phrase_with_spl_char():
#print symbols+numbers
remove_phrase=[]
for word in phrase:
for chars in symbols+numbers:
if chars in word:
remove_phrase.append(word)
#print word
break
for word in remove_phrase:
phrase.remove(word)
def write_phrase():
output_file_name=folder_name.split("/")
size=len(output_file_name)
#res_file=output_folder+"/"+output_file_name[size-1]+"_grams.txt"
res_file=output_folder+"/"+output_file_name[size-1]+ext
f=open(res_file,"w")
for p in phrase:
f.write(p+"\n")
f.close()
for f in sorted(text_files):
height=[]
row_num=[]
text=[]
ftxt=open(folder_name+"/"+f,"r")
content=ftxt.readlines()
ftxt.close()
line_number=1
for line in content:
data=line.split("::")
if len(data)==3:
height.append(int(data[1].split(" ")[1]))
if int(data[1].split(" ")[1])>max_height:
max_height=int(data[1].split(" ")[1])
text.append(data[2].rstrip("\n").lower())
video.append(video_text(folder_name+"/"+f,height,text))
for v in video:
v.extract_candidate_phrases()
remove_phrase_with_spl_char()
write_phrase()
| mit | 9,185,321,193,961,970,000 | 32.103261 | 195 | 0.482023 | false |
xiaotaw/chembl | dnn_model/single_vs_chemdiv.py | 1 | 3682 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import datetime
import numpy as np
import pandas as pd
import tensorflow as tf
from matplotlib import pyplot as plt
import dnn_model
sys.path.append("/home/scw4750/Documents/chembl/data_files/")
import chembl_input as ci
vs_batch_size = 1024
def virtual_screening_chemdiv(target, g_step, gpu_num=0):
t_0 = time.time()
# dataset
d = ci.DatasetChemDiv(target)
# batch size
batch_size = 128
# input vec_len
input_vec_len = d.num_features
# keep prob
keep_prob = 0.8
# weight decay
wd = 0.004
# g_step
#g_step = 2236500
# virtual screen pred file
pred_dir = "pred_files/%s" % target
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
pred_path = os.path.join(pred_dir, "vs_chemdiv_%s_%d_%4.3f_%4.3e_%d.pred" % (target, batch_size, keep_prob, wd, g_step))
predfile = open(pred_path, 'w')
print("virtual screen ChemDiv starts at: %s\n" % datetime.datetime.now())
# checkpoint file
ckpt_dir = "ckpt_files/%s" % target
ckpt_path = os.path.join(ckpt_dir, '%d_%4.3f_%4.3e.ckpt' % (batch_size, keep_prob, wd))
# screening
with tf.Graph().as_default(), tf.device("/gpu: %d" % gpu_num):
# the input
input_placeholder = tf.placeholder(tf.float32, shape = (None, input_vec_len))
# the term
base = dnn_model.term(input_placeholder, in_units=input_vec_len, wd=wd, keep_prob=1.0)
# the branches
softmax = dnn_model.branch(target, base, wd=wd, keep_prob=1.0)
# create a saver.
saver = tf.train.Saver(tf.trainable_variables())
# Start screen
config=tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.35
with tf.Session(config=config) as sess:
# Restores variables from checkpoint
saver.restore(sess, ckpt_path + "-%d" % g_step)
for ids, features in d.batch_generator_chemdiv(vs_batch_size):
sm = sess.run(softmax, feed_dict = {input_placeholder: features})
for id_, sm_v in zip(ids, sm[:, 1]):
predfile.write("%s\t%f\n" % (id_, sm_v))
"""
try:
while True:
ids, features = d.generate_batch(vs_batch_size)
sm = sess.run(softmax, feed_dict = {input_placeholder: features.toarray()})
for id_, sm_v in zip(ids, sm[:, 1]):
predfile.write("%s\t%f\n" % (id_, sm_v))
except StopIteration:
pass
"""
predfile.close()
print("duration: %.3f" % (time.time() - t_0))
def analyse_sort_chemdiv(target, g_step):
pred_file = "pred_files/%s/vs_chemdiv_%s_128_0.800_4.000e-03_%d.pred" % (target, target, g_step)
pred = pd.read_csv(pred_file, sep="\t", names=("id", "pred"))
pred.sort_values(by="pred", ascending=False, inplace=True)
pred1000 = pred.iloc[:1000]
pred1000.to_csv(pred_file.replace(".pred", ".pred1000"), header=False, sep="\t")
if __name__ == "__main__":
target_list = ["CHEMBL203", "CHEMBL204", "CHEMBL205",
"CHEMBL206", "CHEMBL217", "CHEMBL235", "CHEMBL240",
"CHEMBL244", "CHEMBL253", "CHEMBL279", "CHEMBL340",
"CHEMBL4005", "CHEMBL4296", "CHEMBL4805", "CHEMBL4822",
]
g_list = [2161371, 2236500, 2235600,
2091321, 2161661, 2086841, 2020411,
2161951, 2012041, 2161661, 2246400,
2235900, 2238000, 2168041, 1936221,
]
#i = int(sys.argv[1])
#target = target_list[i]
#g_step = g_list[i]
virtual_screening_chemdiv(target="CHEMBL4005", g_step=2235900, gpu_num=1)
analyse_sort_chemdiv("CHEMBL4005", g_step=2235900)
| apache-2.0 | -2,227,815,573,639,499,300 | 30.20339 | 122 | 0.626018 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.