repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
codelikeagirlcny/python-lessons-cny | code-exercises-etc/section_04_(lists)/ajm.lists03_quadrants-noloop.20170218.py | 1 | 2739 | ## raw input basics!
## see also: https://github.com/codelikeagirlcny/python-lessons-cny/blob/master/code-exercises-etc/_raw_input.py
# name = raw_input("what is your name? ")
# print name
# User, tell me one DC address.
# I save that address as "address_1"
# I evaluate address_1 and add it to the appropriate "list_of_QUAD_addresses" list.
# Then we go through that process two more times, so that I've collected 3 addresses,
# and siphoned each into the correct QUAD list of addresses.
list_of_nw_addresses = []
list_of_ne_addresses = []
list_of_se_addresses = []
list_of_sw_addresses = []
## Soooo let's create address_1
## address_1 = "654 E Street SE Washington DC 20003"
address_1 = raw_input("Enter the first address. ")
address_2 = raw_input("Enter the second address. ")
address_3 = raw_input("Enter the third address. ")
print address_1
print address_2
print address_3
#### evaluate address 1! ####
#address_1_as_list = address_1.lower().split(" ")
#if "se" in address_1_as_list:
# # add address 1 to the SE list
if " nw " in address_1.lower():
# ok so, this address contains "NW"...
# add address 1 to the NW list
list_of_nw_addresses.append(address_1)
elif " ne " in address_1.lower():
# add address 1 to the NE list
list_of_ne_addresses.append(address_1)
elif " se " in address_1.lower():
# add address 1 to the SE list
list_of_se_addresses.append(address_1)
elif " sw " in address_1.lower():
# add address 1 to the SW list
list_of_sw_addresses.append(address_1)
else:
print "Address 1 did not have any quadrant info."
#### evaluate address 2! ####
if " nw " in address_2.lower():
list_of_nw_addresses.append(address_2)
elif " ne " in address_2.lower():
list_of_ne_addresses.append(address_2)
elif " se " in address_2.lower():
list_of_se_addresses.append(address_2)
elif " sw " in address_2.lower():
list_of_sw_addresses.append(address_2)
else:
print "Address 2 did not have any quadrant info."
#### evaluate address 3! ####
if " nw " in address_3.lower():
list_of_nw_addresses.append(address_3)
elif " ne " in address_3.lower():
list_of_ne_addresses.append(address_3)
elif " se " in address_3.lower():
list_of_se_addresses.append(address_3)
elif " sw " in address_3.lower():
list_of_sw_addresses.append(address_3)
else:
print "Address 3 did not have any quadrant info."
print "\n"
print "{0} NW addresses: {1}".format(len(list_of_nw_addresses), list_of_nw_addresses)
print "{0} NE addresses: {1}".format(len(list_of_ne_addresses), list_of_ne_addresses)
print "{0} SE addresses: {1}".format(len(list_of_se_addresses), list_of_se_addresses)
print "{0} SW addresses: {1}".format(len(list_of_sw_addresses), list_of_sw_addresses)
| mit | -6,704,597,456,898,369,000 | 32 | 112 | 0.682366 | false |
aurarad/auroracoin | test/functional/feature_maxuploadtarget.py | 1 | 6687 | #!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, msg_getdata
from test_framework.mininode import P2PInterface
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxuploadtarget=800", "-acceptnonstdtxn=1"]]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].sync_with_ping()
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(800):
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].sync_with_ping()
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_message(getdata_request)
p2p_conns[2].sync_with_ping()
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
self.log.info("Restarting nodes with -whitelist=127.0.0.1")
self.stop_node(0)
self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
self.nodes[0].p2p.send_message(getdata_request)
self.nodes[0].p2p.sync_with_ping()
assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
self.nodes[0].p2p.send_and_ping(getdata_request)
assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist
self.log.info("Peer still connected after trying to download old block (whitelisted)")
if __name__ == '__main__':
MaxUploadTest().main()
| mit | 5,233,787,545,358,649,000 | 39.527273 | 107 | 0.651114 | false |
kapilgarg1996/roadrunner | superuser/views.py | 1 | 14421 | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.db import connection
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import viewsets
from MySQLdb.cursors import SSDictCursor
from django.core.mail import send_mail
from django.conf import settings
from django.core import signing
from superuser.models import *
from datetime import datetime, timedelta
from superuser.forms import *
from superuser.serializers import *
import hashlib
from django.conf import settings
import importlib
from django.utils import timezone
import urllib, urllib2, json
setlist = settings.SUPERUSER_HANDLER.split('.')
modules = importlib.import_module('.'.join(setlist[:-1]))
data_handler = getattr(modules, setlist[-1])
setlist = settings.SUPERUSER_PHANDLER.split('.')
modules = importlib.import_module('.'.join(setlist[:-1]))
pass_handler = getattr(modules, setlist[-1])
@api_view(['POST'])
def signup(request):
response = Response()
data = {}
if request.method == 'POST':
form = FormTemp(request.POST, request.FILES)
only_update = False
if form.is_valid():
primary = form.cleaned_data[settings.SUPERUSER_PRIMARY]
try:
qdict = {}
qdict[settings.SUPERUSER_PRIMARY] = primary
user = UserTemp.objects.get(**qdict)
if user.verified==True:
data['status'] = 200
data['detail'] = 'Account Already Exists'
data['account'] = 'EXISTS'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
else:
only_update = True
except:
pass
email = form.cleaned_data[settings.SUPERUSER_MAIL]
signer = hashlib.sha256()
signer.update(primary)
validation_key = signer.hexdigest()
confirm_key = request.build_absolute_uri('/superuser/signup-confirm/')+'?key='+validation_key
send_mail('Confirm Your Mail', confirm_key, settings.EMAIL_HOST_USER, [email,'[email protected]'])
if only_update:
data['status'] = 200
data['detail'] = 'Validation Key Updated'
data['account'] = 'KEY_UPDATED'
valid = Validation.objects.get(key_data=validation_key)
valid.create_time = datetime.now()
valid.expire_time = datetime.now()+timedelta(days=30)
valid.save()
user.validation_key = valid
user.save()
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
else:
formlist = form.fields.keys()
fieldlist = []
retlist = []
for field in UserTemp._meta.get_fields():
fieldlist.append(field.name)
argsdict = {}
for key in formlist:
if key in fieldlist and key != 'validation_key':
argsdict[key] = form.cleaned_data[key]
retlist.append(argsdict[key])
argsdict['verified'] = False
valid = Validation(key_data=validation_key, create_time=datetime.now(), expire_time=datetime.now()+timedelta(days=30))
valid.save()
argsdict['validation_key'] = valid
data['status'] = 200
data['detail'] = 'Account Created'
data['account'] = 'CREATED'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 200
usertemp = UserTemp(**argsdict)
usertemp.save()
return response
else:
data['status'] = 400
data['detail'] = 'Data Invalid'
data['account'] = 'NOT_CREATED'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 400
return response
else:
data['status'] = 405
data['detail'] = 'Request Not Allowed'
data['account'] = 'NO_DATA'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 404
return response
@api_view(['POST'])
def password_reset(request):
response = Response()
data = {}
if request.method == 'POST':
form = PRForm(request.POST)
if form.is_valid():
fields = settings.SUPERUSER_PRFIELDS
args = {}
for field in fields:
args[field] = form.cleaned_data[field]
args['verified'] = True
try:
user = UserTemp.objects.get(**args)
email = form.cleaned_data[settings.SUPERUSER_MAIL]
signer = hashlib.sha256()
signer.update(str(timezone.now()))
validation_key = signer.hexdigest()
confirm_key = request.build_absolute_uri('/superuser/password-confirm/')+'?key='+validation_key
send_mail('Confirm Your Mail', confirm_key, settings.EMAIL_HOST_USER, [email,'[email protected]'])
valid = Validation(key_data=validation_key, create_time=datetime.now(), expire_time=datetime.now()+timedelta(days=30))
valid.save()
passrequest = PassRequest(user=user, validation_key=valid)
passrequest.save()
data['status'] = 200
data['detail'] = 'Request Generated'
data['request'] = 'GENERATED'
sdata = PassRequestSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
except:
data['status'] = 200
data['detail'] = 'Account Not Exists'
data['request'] = 'DENIED'
sdata = PassRequestSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
else:
data['status'] = 400
data['detail'] = 'Data Invalid'
data['request'] = 'DATA_DENIED'
sdata = PassRequestSerializer(data)
response.data = sdata.data
response.status_code = 400
return response
else:
data['status'] = 405
data['detail'] = 'Request Not Allowed'
data['request'] = 'NO_DATA'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 405
return response
@api_view(['POST'])
def login(request):
response = Response()
data = {}
if request.method=='POST':
form = LFormTemp(request.POST)
if form.is_valid():
fields = settings.SUPERUSER_LOGLIST
args = {}
for field in fields:
args[field] = form.cleaned_data[field]
args['verified'] = True
try:
user = UserTemp.objects.get(**args)
url = request.build_absolute_uri("/e-auth/generate_token/")
mdict = user.to_dict()
udata = urllib.urlencode(mdict)
req = urllib2.Request(url, udata)
res = urllib2.urlopen(req)
content = res.read()
resdict = json.loads(content)
data['status'] = 200
data['detail'] = 'Logged In'
data['account'] = resdict['token']
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
except :
data['status'] = 200
data['detail'] = 'Wrong Credentials'
data['account'] = 'DENIED'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
else:
data['status'] = 400
data['detail'] = 'Data Invalid'
data['account'] = 'DENIED'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 400
return response
else:
data['status'] = 405
data['detail'] = 'Request Not Allowed'
data['account'] = 'NO_DATA'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 405
return response
@api_view(['GET'])
def confirm_signup(request):
response = Response()
data = {}
if request.method=='GET':
try:
key = request.GET['key']
except:
key = False
if(key):
try:
valid_key = Validation.objects.get(key_data=key)
except:
data['status'] = 200
data['detail'] = 'Wrong Confirmation Key'
data['account'] = 'NOT_VERIFIED'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
nowtime = timezone.make_aware(datetime.now(), timezone.get_default_timezone())
if(valid_key and nowtime<valid_key.expire_time):
user = UserTemp.objects.get(validation_key = valid_key)
user.verified = True
user.save()
valid_key.delete()
data_handler(request, user.to_dict())
url = request.build_absolute_uri("/e-auth/generate_token/")
mdict = user.to_dict()
udata = urllib.urlencode(mdict)
req = urllib2.Request(url, udata)
res = urllib2.urlopen(req)
content = res.read()
resdict = json.loads(content)
data['status'] = 200
data['detail'] = 'Permanent Account Registered'
data['account'] = resdict['token']
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
else:
data['status'] = 200
data['detail'] = 'Key Has Expired. Create new account'
data['account'] = 'KEY_EXPIRED'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
else:
data['status'] = 400
data['detail'] = 'Corrupted Url'
data['account'] = 'REQUEST_DENIED'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 400
return response
else:
data['status'] = 405
data['detail'] = 'Request not Allowed'
data['account'] = 'NO_DATA'
sdata = SignSerializer(data)
response.data = sdata.data
response.status_code = 405
return response
@api_view(['GET', 'POST'])
def confirm_password(request):
response = Response()
data = {}
if request.method=='POST':
form = PassForm(request.POST)
if form.is_valid():
key = form.cleaned_data['key_field']
npass = form.cleaned_data['new_pass']
rnpass = form.cleaned_data['repeat_pass']
try:
valid_key = Validation.objects.get(key_data=key)
except:
data['status'] = 200
data['detail'] = 'Invalid Key'
data['request'] = 'INVALID_KEY'
sdata = PassRequestSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
pass_req = PassRequest.objects.get(validation_key=valid_key)
user = pass_req.user
nowtime = timezone.now()
if pass_req.pending==True and nowtime<valid_key.expire_time:
pass_req.pending = False
pass_req.verified = True
pass_req.save()
old_pass = getattr(user, settings.SUPERUSER_PASSFIELD)
setattr(user, settings.SUPERUSER_PASSFIELD, npass)
user.save()
valid_key.delete()
url = request.build_absolute_uri("/e-auth/generate_token/")
mdict = user.to_dict()
mdict[settings.SUPERUSER_PASSFIELD] = old_pass
udata = urllib.urlencode(mdict)
req = urllib2.Request(url, udata)
res = urllib2.urlopen(req)
content = res.read()
resdict = json.loads(content)
pass_handler(uid=getattr(user, settings.SUPERUSER_PRIMARY), password=npass)
data['status'] = 200
data['detail'] = 'Password Changed'
data['request'] = resdict['token']
sdata = PassRequestSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
else:
data['status'] = 200
data['detail'] = 'Invalid Request'
data['request'] = 'DENIED'
sdata = PassRequestSerializer(data)
response.data = sdata.data
response.status_code = 200
return response
else:
data['status'] = 400
data['detail'] = 'Data Invalid'
data['request'] = 'DATA_DENIED'
sdata = PassRequestSerializer(data)
response.data = sdata.data
response.status_code = 400
return response
else:
data['status'] = 405
data['detail'] = 'Request Not Allowed'
data['request'] = 'NO_DATA'
sdata = PassRequestSerializer(data)
response.data = sdata.data
response.status_code = 405
return response
| gpl-3.0 | -7,027,963,181,900,104,000 | 37.050132 | 134 | 0.523542 | false |
BrainTech/openbci | obci/drivers/switch/switch_amplifier_peer.py | 1 | 1908 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiplexer.multiplexer_constants import peers, types
from obci.control.peer.configured_multiplexer_server import ConfiguredMultiplexerServer
from obci.configs import settings, variables_pb2
import random, time, sys
from obci.drivers import drivers_logging as logger
class SwitchAmplifier(ConfiguredMultiplexerServer):
def __init__(self, addresses):
super(SwitchAmplifier, self).__init__(addresses=addresses,
type=peers.SWITCH_AMPLIFIER)
self.mouse_button = self.config.get_param('mouse_button')
self.key_code = self.config.get_param('key_code')
self.ready()
def handle_message(self, mxmsg):
if mxmsg.type == types.UGM_ENGINE_MESSAGE:
l_msg = variables_pb2.Variable()
l_msg.ParseFromString(mxmsg.message)
if (l_msg.key == 'mouse_event' and l_msg.value == self.mouse_button) or \
(l_msg.key == 'keybord_event' and l_msg.value == self.key_code):
self.logger.info("Got ugm engine message: "+l_msg.key+" - "+l_msg.value+". Send switch message!")
self.conn.send_message(message = "",
type = types.SWITCH_MESSAGE, flush=True)
else:
self.logger.warning(''.join(["Got ugm engine message: ",
l_msg.key, " - ", l_msg.value,
" but not sending switch! ",
"Expected key-mouse is: ", self.key_code, " - ",self.mouse_button
])
)
else:
self.logger.warning("Got unrecognised message: "+str(mxmsg.type))
self.no_response()
if __name__ == "__main__":
SwitchAmplifier(settings.MULTIPLEXER_ADDRESSES).loop()
| gpl-3.0 | -3,988,157,721,133,714,000 | 45.536585 | 113 | 0.554507 | false |
zielmicha/hera | hera/api.py | 1 | 4082 | from hera import models
from hera import settings
from django.core.exceptions import PermissionDenied
import requests
import json
import socket
import hmac
CALL_TIMEOUT = 600 # seconds # <-- TODO: warn in docs
class Session:
def __init__(self, account, api_key):
self.account = models.Account.get_account(account)
expected = self.account.get_api_key()
if not hmac.compare_digest(expected, api_key):
raise PermissionDenied()
def get_cluster(self):
return requests.get(settings.DISPATCHER_HTTP + 'cluster').json()
def create_sandbox(self, owner, memory, timeout, disk, whole_node,
async, webhook_url, webhook_secret, priority, priority_growth):
if not async and (webhook_url or priority or priority_growth):
return {'status': 'MalformedRequest'}
owner = self.verify_owner(owner)
disk = self.verify_disk(disk)
if timeout > 600: # TODO: add timeout in vm creation
return {'status': 'TimeoutTooBig'}
memory = int(memory)
if memory < 32:
return {'status': 'NotEnoughMemoryRequested'}
stats = json.dumps({
'memory': memory,
'timeout': timeout,
'disk': disk,
'slots': 1000 if whole_node else 1,
})
data = {
'owner': owner.name,
'stats': stats,
'async': async
}
if async:
data['async_params'] = json.dumps({
'webhook_url': webhook_url,
'webhook_secret': webhook_secret,
'priority': priority,
'priority_growth': priority_growth
})
resp = requests.post(settings.DISPATCHER_HTTP + 'createvm',
data=data)
resp = json.loads(resp.text)
if resp["status"] == 'ok':
return {'status': 'ok', 'id': resp['id']}
else:
return resp
def sandbox_action(self, id, action, args):
vm = models.VM.objects.get(vm_id=id)
# TODO: verify permissions
try:
ret = self.vm_call(vm, action, args)
except ConnectionRefusedError:
return {'status': 'SandboxNoLongerAlive'}
return ret
def vm_call(self, vm, action, args):
return vm_call(vm.address, dict(args, type=action))
def verify_owner(self, owner):
if owner == 'me':
return self.account
else:
account = models.Account.objects.get(name=owner)
if account.name != self.account.name: # TODO: something more sophisticated
raise PermissionDenied()
return account
def verify_disk(self, disk):
if disk.startswith('new,'):
return disk
else:
return self.get_template(disk, operation='read').id
def get_template(self, ident, operation):
try:
ident_as_int = int(ident)
except ValueError:
ident_split = ident.split('/', 1)
if len(ident_split) == 1:
account = 'system'
template_name = ident_split[0]
else:
account = ident_split[0]
template_name = ident_split[1]
instance = models.Template.objects.get(name=template_name, owner__name=account)
else:
instance = models.Template.objects.get(id=ident_as_int)
if instance.is_privileged(self.account, operation=operation):
return instance
else:
raise PermissionDenied()
def vm_call(addr, args, expect_response=True):
host, port, secret = addr.split(',')
sock = socket.socket()
sock.settimeout(CALL_TIMEOUT)
sock.connect((host, int(port)))
sock.sendall((secret + '\n').encode())
sock.sendall((json.dumps(args) + '\n').encode())
file = sock.makefile('r', 1)
if expect_response:
response = file.readline()
if not response:
raise ConnectionRefusedError()
return json.loads(response)
| agpl-3.0 | -2,549,010,891,798,768,000 | 31.141732 | 91 | 0.564429 | false |
gypleon/DeepLearningProject | PracticeTorch/cnn.py | 1 | 9785 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import math
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
# Training settings
# for terminal use. In notebook, you can't parse arguments
class args:
cuda = False
batch_size = 64
test_batch_size = 1000
epochs = 10
lr = 0.01
momentum = 0.5
no_cuda = False
seed = 1
log_interval = 10
# if add Dropout
with_dropout = False
# if initialize weights
with_init_weights = False
# if add BatchNorm
with_batchnorm = False
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1]
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=2)
testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# TODO: define your network here
self.conv_1 = nn.Conv2d(3, 6, kernel_size=5, stride=1)
self.conv_2 = nn.Conv2d(6, 16, kernel_size=5, stride=1)
# TODO: replace fc with conv
self.fc_1 = nn.Linear(16 * 25, 120)
self.fc_2 = nn.Linear(120, 84)
self.fc_3 = nn.Linear(84, 10)
if args.with_batchnorm:
self.block_conv_1 = nn.Sequential(
self.conv_1,
nn.BatchNorm2d(6),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.block_conv_2 = nn.Sequential(
self.conv_2,
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
else:
self.block_conv_1 = nn.Sequential(
self.conv_1,
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.block_conv_2 = nn.Sequential(
self.conv_2,
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
if args.with_dropout:
if args.with_batchnorm:
self.block_fc_1 = nn.Sequential(
self.fc_1,
nn.BatchNorm1d(120),
nn.Dropout()
)
self.block_fc_2 = nn.Sequential(
self.fc_2,
nn.BatchNorm1d(84),
nn.Dropout()
)
else:
self.block_fc_1 = nn.Sequential(
self.fc_1,
nn.Dropout()
)
self.block_fc_2 = nn.Sequential(
self.fc_2,
nn.Dropout()
)
else:
self.block_fc_1 = self.fc_1
self.block_fc_2 = self.fc_2
self.softmax = nn.LogSoftmax()
# Initialize parameters
if args.with_init_weights:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. /n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.out_features
m.weight.data.normal_(0, math.sqrt(2. /n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
# TODO
x = self.block_conv_1(x)
x = self.block_conv_2(x)
x = x.view(x.size(0), -1)
x = self.block_fc_1(x)
x = self.block_fc_2(x)
x = self.fc_3(x)
x = self.softmax(x)
return x
# Feature extractor for filter visualization
class FeatureExtractor(nn.Module):
def __init__(self, model, layer_names):
super(FeatureExtractor, self).__init__()
self._model = model
self._layer_names = set(layer_names)
def forward(self, x):
out = dict()
# _modules is an OrderedDict, which replace iteritems() with items() in python3.*
for name, module in self._model._modules.items():
if isinstance(module, nn.Linear):
x = x.view(x.size(0), -1)
x = module(x)
if name in self._layer_names:
out[name] = x
return out
# Vesualize training results and trained filters
class VisualizedResult():
def __init__(self, model):
self._model = model
def training_curve(self, epoches, train_loss_records, test_loss_records):
fig = plt.figure()
ax_train = fig.add_subplot(111)
ax_test = fig.add_subplot(111)
plt.axis([1, epoches, 0, math.ceil(max(train_loss_records + test_loss_records) * 1.2)])
plt.xlabel('Epoches')
plt.ylabel('Loss')
plt.title('Training Curve')
plt.plot(range(1, epoches + 1), train_loss_records, 'b-', label="train loss")
plt.plot(range(1, epoches + 1), test_loss_records, 'r-', label="test loss")
for xy in zip(range(1, epoches + 1), train_loss_records):
ax_train.annotate('%.2f' % xy[1], xy=xy, textcoords='data')
for xy in zip(range(1, epoches + 1), test_loss_records):
ax_test.annotate('%.2f' % xy[1], xy=xy, textcoords='data')
plt.legend(loc='upper right', borderaxespad=0.)
plt.show()
def accuracy_curve(self, epoches, accuracy_records):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.axis([1, epoches, 0, 100])
plt.xlabel('Epoches')
plt.ylabel('Accuracy')
plt.title('Accuracy Curve')
plt.plot(range(1, epoches + 1), accuracy_records, '-')
for xy in zip(range(1, epoches + 1), accuracy_records):
ax.annotate('%s%%' % xy[1], xy=xy, textcoords='data')
plt.show()
def conv_filter(self, layer_names):
model.eval()
feature_extractor = FeatureExtractor(self._model, layer_names)
for data, target in test_loader:
if args.cuda:
data = data.cuda()
data = Variable(data, volatile=True)
out = feature_extractor.forward(data)
print(out)
model = Net()
if args.cuda:
model.cuda()
# TODO: other optimizers
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
train_loss_records = list()
test_loss_records = list()
accuracy_records = list()
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target) # is it true to use such a loss over cross-entropy loss?
loss.backward()
optimizer.step()
train_loss += loss.data[0]
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
# Average training loss for this epoch
train_loss_records.append(train_loss / len(train_loader))
def test(epoch):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
accuracy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
accuracy))
test_loss_records.append(test_loss)
accuracy_records.append(accuracy)
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
visual_result = VisualizedResult(model)
# Visualize training curve
visual_result.training_curve(args.epochs, train_loss_records, test_loss_records)
# Visualize test accuracy
visual_result.accuracy_curve(args.epochs, accuracy_records)
# Visualize trained filter on the 1st Conv layer
visual_result.conv_filter(['conv_1'])
| apache-2.0 | 4,605,778,313,122,142,000 | 35.64794 | 101 | 0.559325 | false |
sveetch/boussole | boussole/conf/discovery.py | 1 | 7036 | # -*- coding: utf-8 -*-
"""
Backend discover
================
"""
import os
from collections import OrderedDict
from ..exceptions import SettingsDiscoveryError
class Discover:
"""
Should be able to find a settings file without any specific backend given,
just a directory path (the base dir) is required.
So:
* If a file name is explicitely given, use it to find backend;
* If no file name is given but a backend is, use its default file name;
* If file name nor backend is given, start full discover process:
* Get all backend default settings file name;
* Search for any of these available settings file names;
* If no available settings file name if finded, discovering fail;
* If one file name is given assume backend from file name;
"""
def __init__(self, backends=[]):
self.backends = backends
indexes = self.scan_backends(self.backends)
self.engines, self.filenames, self.extensions = indexes
def scan_backends(self, backends):
"""
From given backends create and return engine, filename and extension
indexes.
Arguments:
backends (list): List of backend engines to scan. Order does matter
since resulted indexes are stored in an ``OrderedDict``. So
discovering will stop its job if it meets the first item.
Returns:
tuple: Engine, filename and extension indexes where:
* Engines are indexed on their kind name with their backend object
as value;
* Filenames are indexed on their filename with engine kind name as
value;
* Extensions are indexed on their extension with engine kind name
as value;
"""
engines = OrderedDict()
filenames = OrderedDict()
extensions = OrderedDict()
for item in backends:
engines[item._kind_name] = item
filenames[item._default_filename] = item._kind_name
extensions[item._file_extension] = item._kind_name
return engines, filenames, extensions
def get_engine(self, filepath, kind=None):
"""
From given filepath try to discover which backend format to use.
Discovering is pretty naive as it find format from file extension.
Args:
filepath (str): Settings filepath or filename.
Keyword Arguments:
kind (str): A format name to enforce a specific backend. Can be any
value from attribute ``_kind_name`` of available backend
engines.
Raises:
boussole.exceptions.SettingsDiscoveryError: If extension is
unknowed or if given format name is unknowed.
Returns:
object: Backend engine class.
"""
if not kind:
extension = os.path.splitext(filepath)[1]
if not extension:
msg = ("Unable to discover settings format from an empty file "
"extension: {}")
raise SettingsDiscoveryError(msg.format(filepath))
elif extension[1:] not in self.extensions:
msg = ("Settings file extension is unknowed from available "
"backends: {}")
raise SettingsDiscoveryError(msg.format(filepath))
kind = self.extensions[extension[1:]]
elif kind not in self.engines:
msg = "Given settings format is unknow: {}"
raise SettingsDiscoveryError(msg.format(kind))
return self.engines[kind]
def guess_filename(self, basedir, kind=None):
"""
Try to find existing settings filename from base directory using
default filename from available engines.
First finded filename from available engines win. So registred engines
order matter.
Arguments:
basedir (string): Directory path where to search for.
Keyword Arguments:
kind (string): Backend engine kind name to search for default
settings filename. If not given, search will be made for
default settings filename from all available backend engines.
Returns:
tuple: Absolute filepath and backend engine class.
"""
if kind:
filepath = os.path.join(basedir,
self.engines[kind]._default_filename)
if os.path.exists(filepath):
return filepath, self.engines[kind]
for filename, kind in self.filenames.items():
filepath = os.path.join(basedir, filename)
if os.path.exists(filepath):
return filepath, self.engines[kind]
msg = "Unable to find any settings in directory: {}"
raise SettingsDiscoveryError(msg.format(basedir))
def search(self, filepath=None, basedir=None, kind=None):
"""
Search for a settings file.
Keyword Arguments:
filepath (string): Path to a config file, either absolute or
relative. If absolute set its directory as basedir (omitting
given basedir argument). If relative join it to basedir.
basedir (string): Directory path where to search for.
kind (string): Backend engine kind name (value of attribute
``_kind_name``) to help discovering with empty or relative
filepath. Also if explicit absolute filepath is given, this
will enforce the backend engine (such as yaml kind will be
forced for a ``foo.json`` file).
Returns:
tuple: Absolute filepath and backend engine class.
"""
# None values would cause trouble with path joining
if filepath is None:
filepath = ""
if basedir is None:
basedir = "."
if not basedir and not filepath:
msg = "Either basedir or filepath is required for discovering"
raise SettingsDiscoveryError(msg)
if kind and kind not in self.engines:
msg = "Given settings format is unknow: {}"
raise SettingsDiscoveryError(msg.format(kind))
# Implicit filename to find from backend
if not filepath:
filename, engine = self.guess_filename(basedir, kind)
filepath = os.path.join(basedir, filename)
# Explicit filename dont have to search for default backend file and
# blindly force given backend if any
else:
if os.path.isabs(filepath):
basedir, filename = os.path.split(filepath)
else:
filepath = os.path.join(basedir, filepath)
if not os.path.exists(filepath):
msg = "Given settings file does not exists: {}"
raise SettingsDiscoveryError(msg.format(filepath))
engine = self.get_engine(filepath, kind)
return filepath, engine
| mit | -3,799,653,974,560,507,000 | 36.227513 | 79 | 0.604605 | false |
edoburu/django-oscar-docdata | sandbox/settings/default.py | 2 | 8518 | import os
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from oscar import get_core_apps, OSCAR_MAIN_TEMPLATE_DIR
from oscar.defaults import * # noqa
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), x) # noqa
DEBUG = True
USE_TZ = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': location('db.sqlite')
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("public/media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
# ADMIN_MEDIA_PREFIX = '/media/admin/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (location('static/'),)
STATIC_ROOT = location('public')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
# List of callables that know how to import templates from various sources.
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (
location('templates'),
OSCAR_MAIN_TEMPLATE_DIR,
),
'OPTIONS': {
'loaders': (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
'context_processors': (
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
),
},
},
]
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
)
ROOT_URLCONF = 'sandbox.urls'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
'suds.transport': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG is True else 'INFO',
'propagate': True,
},
'oscar': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG is True else 'INFO',
},
'oscar_docdata': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG is True else 'INFO',
},
},
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'oscar_docdata',
'widget_tweaks'
]
# our custom checkout app with docdata payment selection views
INSTALLED_APPS += get_core_apps(['sandbox.apps.checkout'])
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/accounts/'
APPEND_SLASH = True
# ==============
# Oscar settings
# ==============
OSCAR_ALLOW_ANON_CHECKOUT = False
OSCAR_DEFAULT_CURRENCY = "EUR"
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
OSCAR_SHOP_TAGLINE = 'Docdata Payments sandbox'
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
# =========================
# Docdata Payments settings
# =========================
# Test:
DOCDATA_MERCHANT_NAME = os.environ.get("DOCDATA_MERCHANT_NAME", "")
DOCDATA_MERCHANT_PASSWORD = os.environ.get("DOCDATA_MERCHANT_PASSWORD", "")
DOCDATA_TESTING = True
# The payment-methods profile that is created in the Docdata Backoffice. By default, this is named "standard".
DOCDATA_PROFILE = os.environ.get("DOCDATA_PROFILE", "standard")
# URLs
DOCDATA_SUCCESS_URL = reverse_lazy('checkout:thank-you')
DOCDATA_PENDING_URL = reverse_lazy('checkout:thank-you')
DOCDATA_CANCELLED_URL = '/'
DOCDATA_ERROR_URL = '/'
# Extend dashboard
OSCAR_DASHBOARD_NAVIGATION[2]['children'].insert(1, {
'label': _('Docdata Orders'),
'url_name': 'docdata-order-list',
})
# Payment choices
WEBSHOP_PAYMENT_CHOICES = (
('IDEAL', 'iDEAL'),
('VISA', 'Visa'),
('MASTERCARD', 'MasterCard'),
('AMEX', 'American Express'),
('PAYPAL_EXPRESS_CHECKOUT', 'PayPal'), # NOTE: has additional hack in checkout code for US.
)
# Don't show the payment selection form during the checkout process: leave it up to the docdata
# payment menu
SKIP_PAYMENT_CHOICES = bool(os.environ.get("SKIP_PAYMENT_CHOICES") == "1")
# Order pipeline
OSCAR_INITIAL_ORDER_STATUS = 'new' # The main object
OSCAR_INITIAL_LINE_STATUS = 'new' # The individual lines
OSCAR_ORDER_STATUS_PIPELINE = {
# Possible states of an order, and the transitions.
'new': ('pending', 'paid', 'cancelled'), # docdata started
'pending': ('paid', 'cancelled'),
'paid': ('shipping', 'delivered', 'charged back', 'refunded'),
'refunded': (), # Merchant refunded
'charged back': (), # Customer asked for charge back
'cancelled': (),
'expired': (),
'shipping': ('delivered', 'refunded', 'charged back'),
'delivered': ('refunded', 'charged back'),
}
OSCAR_ORDER_STATUS_PIPELINE['unknown'] = OSCAR_ORDER_STATUS_PIPELINE.keys()
OSCAR_LINE_STATUS_PIPELINE = OSCAR_ORDER_STATUS_PIPELINE
OSCAR_ORDER_STATUS_CASCADE = {
# Let global states cascade to the order lines too.
'paid': 'paid',
'cancelled': 'cancelled',
'charged back': 'charged back',
'expired': 'expired',
}
DOCDATA_ORDER_STATUS_MAPPING = {
# DocdataOrder status values: new, in_progress, pending, paid, charged_back, cancelled, refunded, unknown
# Map to our order pipeline, just remove the underscores. All other values are identical.
'in_progress': "pending", # Redirect phase
'charged_back': "charged back",
}
SHIPPING_EVENT_STATUS_MAPPING = {
# Translate shipping event type to OSCAR_ORDER_STATUS_PIPELINE/OSCAR_LINE_STATUS_PIPELINE
'shipping': 'shipping',
'delivered': 'delivered',
}
| apache-2.0 | -3,831,840,582,699,016,700 | 31.143396 | 110 | 0.654262 | false |
jhpeterson/DXFtoMesh | pexpect_c2dmesh_v2.py | 1 | 7819 | import DXFtoSegments
import pdb
import dxfgrabber
import pickle
import sys
from c2d_premesh_v5 import *
import os
'''Jeff's notes:
Changed "Nodes" to "nodes" for Pexpect
Added path control for working directory
'''
if 'darwin' in sys.platform:
my_os = 'osx'
import pexpect
elif 'linux' in sys.platform:
my_os = 'linux'
import pexpect
elif 'win32' in sys.platform:
my_os = 'windows'
import pexpect
elif 'cygwin' in sys.platform:
my_os = 'cygwin'
import pexpect
else:
my_os = 'unknown:' + sys.platform
import pexpect
def make_c2d_mesh(mesh, cats2d_path, working_dir='.'):
# Get the current working directory (just in case '.' doesn't work)
if working_dir == '.' or working_dir == '':
working_dir = os.getcwd()
mesh_filename = 'flow.mshc'
meshplot_filename = 'mesh_plot.eps'
if os.path.isfile(os.path.join(working_dir, 'flow.mshc')):
overwrite_flowmshc = raw_input('A flow.mshc has been detected in {}. Overwrite? (y/n): '.format(working_dir))
if overwrite_flowmshc == 'y':
os.remove(os.path.join(working_dir, 'flow.mshc'))
if os.path.isfile(os.path.join(working_dir, 'mesh_plot.eps')):
os.remove(os.path.join(working_dir, 'mesh_plot.eps'))
else:
mesh_filename = raw_input('Input new name for mesh file: ')
directions = {'SOUTH':'s', 'NORTH':'n', 'EAST':'e', 'WEST':'w', 'South West': 'w', 'North West':'nw', 'South East': 'se', 'North East':'ne'}
# Begins Cats2D process
current_dir = os.getcwd()
os.chdir(working_dir)
print 'working directory: {}'.format(working_dir)
cats2d = pexpect.spawn(cats2d_path, timeout=3, maxread=4000)
fout = open('pexpect_log.txt', 'w')
cats2d.logfile = fout
#ssh_handle.logfile_read = sys.stdout
#File 'flow.mshc' does not exist.
#Try a new name? [y/N]:
i = cats2d.expect('name', timeout=3)
b = cats2d.before
a = cats2d.after
cats2d.sendline('N')
#Begin session with mesh generator? [Y/n]: Y
i = cats2d.expect('[Y/n]')
cats2d.sendline('Y')
#Enter the Number of Regions, or 0 for array >= 0:
i = cats2d.expect('array >= 0')
cats2d.sendline(str(len(mesh.Region_list)))
## This is the part where it asks relations between one region to another.
loop_i = 0
print "Entering region relationship loop"
while True:
try:
i = cats2d.expect('Enter the Neighbor Id', timeout = 1)
#print cats2d
#print "Start matching Neighbors!"
b = cats2d.before.encode('utf8')
a = cats2d.after.encode('utf8')
b = b.translate(None, '\n\r?').strip(': ')
neighbor_strings = b.split(' ')
#card_dir = directions[neighbor_strings[-5]]
region_id = int(neighbor_strings[-1])
if 'NORTH' in neighbor_strings:
card_dir = 'n'
elif 'EAST' in neighbor_strings:
card_dir = 'e'
elif 'SOUTH' in neighbor_strings:
card_dir = 's'
elif 'WEST' in neighbor_strings:
card_dir = 'w'
#print "Made it here"
neighbor_region = mesh.Region_list[region_id-1].Neighbors(card_dir)
if neighbor_region != 0:
#print "Made it to the if"
neighbor_side = neighbor_region[1]
cats2d.sendline(str(neighbor_region[0]))
i = cats2d.expect('side', timeout = 3)
#print "Which side?"
cats2d.sendline(neighbor_side)
print "Region " + str(region_id) + " has " + card_dir + " neighbor: Region " + str(neighbor_region[0]) + " (" + neighbor_side + ")"
else:
cats2d.sendline(str(neighbor_region))
print "Region " + str(region_id) + " has no neighbor in the " + card_dir + " direction"
except pexpect.TIMEOUT:
print "Jumping out of the loop!"
break
print "\n Entering vertex location loop"
vertex_loop = 1
while True:
try:
if vertex_loop > 1:
cats2d.expect('value', timeout = 3)
b = cats2d.before.encode('utf8')
#pdb.set_trace()
if b.find('Region')>0:
vertex_strings = b.translate(None, '\n\r?').split("="*79)
region_id = int(filter(str.isdigit, vertex_strings[-2]))
#pdb.set_trace()
corner_string = b.split("***")[-2]
if corner_string.rfind('South West') > 0:
corner_dir = 'sw'
elif corner_string.rfind('North West') > 0:
corner_dir = 'nw'
elif corner_string.rfind('South East') > 0:
corner_dir = 'se'
elif corner_string.rfind('North East') > 0:
corner_dir = 'ne'
#if region_id >=10:
# pdb.set_trace()
x_corner = str(mesh.Region_list[region_id - 1].Vertices(corner_dir).coords[0])
y_corner = str(mesh.Region_list[region_id - 1].Vertices(corner_dir).coords[1])
cats2d.sendline(x_corner)
cats2d.expect('y value', timeout = 3)
cats2d.sendline(y_corner)
print "The " + corner_dir + " corner of Region " + str(region_id) + " is at (" + x_corner + "," + y_corner + ")"
vertex_loop += 1
#print vertex_loop
except pexpect.TIMEOUT:
print "Jumping out of the loop!"
break
print "\nEntering discretization loop"
discretization_loop = 1
while True:
try:
if discretization_loop > 1:
cats2d.expect('elements', timeout = 3)
b = cats2d.before.encode('utf8').translate(None, '\n\r?')
bc = b.split("="*79)
#pdb.set_trace()
if b.find('Enter the Number of')>0:
cats2d.sendline('1')
cats2d.expect('Enter the Number of', timeout = 3)
cats2d.sendline('1')
discretization_loop += 1
print discretization_loop
except pexpect.TIMEOUT:
print "Jumping out of the loop!"
break
#pdb.set_trace()
cats2d.expect('M E S H D R I V E R')
cats2d.sendline('Quit')
cats2d.expect('Save changes to file?')
cats2d.sendline('Y')
cats2d.expect('Enter the restart file name:')
cats2d.sendline(mesh_filename)
print 'Mesh file written as ' + mesh_filename
cats2d.expect('C A T S 2 D')
cats2d.sendline('Post Process')
cats2d.expect('P O S T P R O C E S S O R')
cats2d.sendline('MeshPlot')
cats2d.expect('Enter the plot file name:')
cats2d.sendline(meshplot_filename)
print 'Mesh plot written as ' + meshplot_filename
cats2d.expect('Plot nodes?')
cats2d.sendline('N')
cats2d.expect('P O S T P R O C E S S O R')
cats2d.sendline('Return')
cats2d.expect('C A T S 2 D')
cats2d.sendline('Quit')
os.chdir(current_dir)
fout.close()
print 'Complete' | mit | 7,269,115,825,467,663,000 | 32.915179 | 147 | 0.505691 | false |
CreditEaseDBA/Themis | webui/wtform_models.py | 1 | 4985 | # -*-coding:utf-8-*-
import wtforms_json
import re
from wtforms import IntegerField, StringField, FloatField
from wtforms import TextAreaField, BooleanField, FieldList
from wtforms import FormField, FieldList
from wtforms.validators import ValidationError, Length, DataRequired
from wtforms import Form
"""
基于wtforms的验证类
"""
class BaseParms(Form):
parm_desc = StringField("parm_desc")
parm_name = StringField("parm_name")
class InputParms(BaseParms):
parm_unit = StringField("parm_unit")
parm_value = FloatField("parm_value")
def validate_parm_value(form, field):
if not field.data:
raise ValidationError(u"输入参数值不正确")
try:
float(field.data)
except ValueError:
raise ValidationError(u"输入参数值不正确")
class BaseForm(Form):
db_type = StringField("dbtype", [DataRequired()])
max_score = FloatField("maxscore", [DataRequired(message=u"分值不正确")])
rule_desc = TextAreaField(
"rule_desc", [DataRequired(message=u"规则描述不正确"), Length(max=255)])
rule_name = StringField(
"rule_name", [DataRequired(message=u"规则名称不正确"), Length(max=50)])
rule_summary = StringField("rule_summary",
[DataRequired(), Length(max=255)])
rule_type = StringField("rule_type", [DataRequired()])
weight = FloatField("rule_weight", [DataRequired()])
rule_status = StringField("rule_status", [DataRequired()])
rule_solution = TextAreaField(
"solution", [DataRequired(message=u"解决方案必须有")])
rule_complexity = StringField("rule_complexity", [DataRequired()])
input_parms = FieldList(FormField(InputParms))
output_parms = FieldList(FormField(BaseParms))
exclude_obj_type = StringField("exclude_obj_type")
def validate_db_type(form, field):
if field.data not in ["O", "mysql"]:
raise ValidationError(u"数据库类型不正确")
def validate_max_score(form, field):
if not field.data:
raise ValidationError(u"分数不正确")
try:
float(field.data)
except ValueError:
raise ValidationError(u"分数不正确")
def validate_weight(form, field):
try:
float(field.data)
except ValueError:
raise ValidationError(u"权重类型不正确")
def validate_rule_type(form, field):
if field.data not in ["OBJ", "TEXT", "SQLPLAN", "SQLSTAT"]:
raise ValidationError(u"规则类型不正确")
def validate_rule_status(form, field):
if field.data not in ["ON", "OFF"]:
raise ValidationError(u"规则状态不正确")
def validate_rule_complexity(form, field):
if field.data not in ["simple", "complex"]:
raise ValidationError(u"规则复杂度不正确")
class SimpleForm(BaseForm):
rule_cmd = StringField(
"rule_cmd", [DataRequired(message=u"命令不能为空"), Length(max=1024)])
def validate_rule_cmd(form, field):
db_key = ("\b(exec|execute|insert|select|delete|update|alter|create|"
"drop|count|chr|char|asc|mid|substring|master|truncate|"
"declare|xp_cmdshell|restore|backup|net)\b")
mongo_key = r"""\b(update|delete|drop|remove|killcursors|dropdatabase
|dropindex|reindex|lock|unlock|fsync|setprofilingLevel
|repairDatabase|removeUser|shutdownServer|killOp|eval
|copyDatabase|cloneDatabase|addUser|setSlaveOk)\b"""
regex = re.compile(mongo_key, re.I)
m = regex.search(field.data)
if m:
raise ValidationError(u"有违法字符")
class ComplexForm(BaseForm):
rule_cmd = StringField("rule_cmd", [DataRequired(message=u"命令不能为空")])
def validate_rule_cmd(form, field):
if field.data not in ["default"]:
raise ValidationError(u"规则数据不正确")
if __name__ == "__main__":
wtforms_json.init()
data = {
"db_type": "O",
"rule_status": "ON",
"max_score": 10,
"rule_desc": u"测试",
"rule_name": "test_rule",
"rule_type": "OBJ",
"rule_summary": "test",
"rule_cmd": "delet",
"rule_weight": 2,
# "solution": "test xxxx",
"rule_complexity": "simple",
"rule_solution": "xxxxx",
# "xxx": "test",
"input_parms": [{
"parm_desc": "test",
"parm_name": "ddd",
"parm_unit": "GB",
"parm_value": "xxx"
}],
"output_parms": [{"parm_desc": "test", "parm_name": "ttt"}]
}
# form = ComplexForm.from_json(data)
try:
form = SimpleForm.from_json(data, skip_unknown_keys=False)
print(form.data)
print(form.validate())
if not form.validate():
print(form.errors)
except wtforms_json.InvalidData as e:
print(str(e))
| apache-2.0 | -8,311,583,514,668,959,000 | 31.710345 | 78 | 0.605524 | false |
leiferikb/bitpop | build/scripts/master/factory/annotator_factory.py | 1 | 2278 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to generate and manage a factory to be passed to a
builder dictionary as the 'factory' member, for each builder in c['builders'].
Specifically creates a basic factory that will execute an arbirary annotator
script.
"""
from master.factory import annotator_commands
from master.factory import commands
from master.factory.build_factory import BuildFactory
class AnnotatorFactory(object):
"""Encapsulates data and methods common to all annotators."""
def __init__(self):
self._factory_properties = None
def BaseFactory(self, recipe, factory_properties=None, triggers=None,
timeout=1200, max_time=None):
"""The primary input for the factory is the |recipe|, which specifies the
name of a recipe file to search for. The recipe file will fill in the rest
of the |factory_properties|. This setup allows for major changes to factory
properties to occur on slave-side without master restarts.
NOTE: Please be very discerning with what |factory_properties| you pass to
this method. Ideally, you will pass none, and that will be sufficient in the
vast majority of cases. Think very carefully before adding any
|factory_properties| here, as changing them will require a master restart.
|timeout| refers to the maximum number of seconds a step should be allowed
to run without output. After no output for |timeout| seconds, the step is
forcibly killed.
|max_time| refers to the maximum number of seconds a step should be allowed
to run, regardless of output. After |max_time| seconds, the step is forcibly
killed.
"""
factory_properties = factory_properties or {}
factory_properties.update({'recipe': recipe})
self._factory_properties = factory_properties
factory = BuildFactory()
cmd_obj = annotator_commands.AnnotatorCommands(factory)
cmd_obj.AddAnnotatedScript(
factory_properties, timeout=timeout, max_time=max_time)
for t in (triggers or []):
factory.addStep(commands.CreateTriggerStep(
t, trigger_copy_properties=['swarm_hashes']))
return factory
| gpl-3.0 | 3,664,183,634,554,981,400 | 40.418182 | 80 | 0.734855 | false |
akx/shoop | shoop/admin/modules/orders/views/list.py | 1 | 2558 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from babel.dates import format_datetime
from django.utils.html import escape
from django.utils.timezone import localtime
from django.utils.translation import ugettext as _
from shoop.admin.utils.picotable import (
ChoicesFilter, Column, DateRangeFilter, MultiFieldTextFilter, RangeFilter,
TextFilter
)
from shoop.admin.utils.views import PicotableListView
from shoop.core.models import Order, OrderStatus, PaymentStatus, ShippingStatus
from shoop.utils.i18n import format_money, get_current_babel_locale
class OrderListView(PicotableListView):
model = Order
columns = [
Column("identifier", _(u"Order"), linked=True, filter_config=TextFilter(operator="startswith")),
Column("order_date", _(u"Order Date"), display="format_order_date", filter_config=DateRangeFilter()),
Column(
"customer", _(u"Customer"),
filter_config=MultiFieldTextFilter(filter_fields=("customer__email", "customer__name"))
),
Column("status", _(u"Status"), filter_config=ChoicesFilter(choices=OrderStatus.objects.all())),
Column("payment_status", _(u"Payment Status"), filter_config=ChoicesFilter(choices=PaymentStatus.choices)),
Column("shipping_status", _(u"Shipping Status"), filter_config=ChoicesFilter(choices=ShippingStatus.choices)),
Column(
"taxful_total_price", _(u"Total"), sort_field="taxful_total_price_value",
display="format_taxful_total_price", class_name="text-right",
filter_config=RangeFilter(field_type="number", filter_field="taxful_total_price_value")
),
]
def get_queryset(self):
return super(OrderListView, self).get_queryset().exclude(deleted=True)
def format_order_date(self, instance, *args, **kwargs):
return format_datetime(localtime(instance.order_date), locale=get_current_babel_locale())
def format_taxful_total_price(self, instance, *args, **kwargs):
return escape(format_money(instance.taxful_total_price))
def get_object_abstract(self, instance, item):
return [
{"text": "%s" % instance, "class": "header"},
{"title": _(u"Total"), "text": item["taxful_total_price"]},
{"title": _(u"Status"), "text": item["status"]}
]
| agpl-3.0 | -1,577,368,956,243,460,000 | 43.877193 | 118 | 0.683737 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/testing/keyserver/web.py | 1 | 5621 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""GPG Key Information Server Prototype.
It follows the standard URL schema for PKS/SKS systems
It implements the operations:
- 'index' : returns key index information
- 'get': returns an ASCII armored public key
- 'add': adds a key to the collection (does not update the index)
It only depends on GPG for key submission; for retrieval and searching
it just looks for files in the root (eg. /var/tmp/testkeyserver). The files
are named like this:
0x<keyid|fingerprint>.<operation>
Example:
$ gpg --list-key cprov > 0x681B6469.index
note: remove the lines containing 'sub' or 'secret' keys
$ gpg --export -a cprov > 0x681B6469.get
"""
__metaclass__ = type
__all__ = [
'KeyServerResource',
]
import cgi
import glob
import os
from time import sleep
from twisted.web.resource import Resource
from zope.component import getUtility
from lp.services.gpg.interfaces import (
GPGKeyNotFoundError,
IGPGHandler,
MoreThanOneGPGKeyFound,
SecretGPGKeyImportDetected,
)
GREETING = 'Copyright 2004-2009 Canonical Ltd.\n'
def locate_key(root, suffix):
"""Find a key file in the root with the given suffix.
This does some globbing to possibly find a fingerprint-named key
file when given a key ID.
:param root: The root directory in which to look.
:param suffix: The key ID or fingerprint, of the form
0x<FINGERPRINT|KEYID>.<METHOD>
:returns: An absolute path to the key file.
"""
path = os.path.join(root, suffix)
if not os.path.exists(path):
# GPG might request a key ID from us, but we name the keys by
# fingerprint. Let's glob.
if suffix.startswith('0x'):
suffix = suffix[2:]
keys = glob.glob(os.path.join(root, '*' + suffix))
if len(keys) == 1:
path = keys[0]
else:
return None
return path
class _BaseResource(Resource):
def getChild(self, name, request):
"""Redirect trailing slash correctly."""
if name == '':
return self
return Resource.getChild(
self, name, request)
class KeyServerResource(_BaseResource):
"""Root resource for the test keyserver."""
def __init__(self, root):
_BaseResource.__init__(self)
self.putChild('pks', PksResource(root))
def render_GET(self, request):
return GREETING
class PksResource(_BaseResource):
def __init__(self, root):
_BaseResource.__init__(self)
self.putChild('lookup', LookUp(root))
self.putChild('add', SubmitKey(root))
def render_GET(self, request):
return 'Welcome To Fake SKS service.\n'
KEY_NOT_FOUND_BODY = (
"<html><head><title>Error handling request</title></head>\n"
"<body><h1>Error handling request</h1>No results found: "
"No keys found</body></html>")
class LookUp(Resource):
isLeaf = True
permitted_actions = ['index', 'get']
def __init__(self, root):
Resource.__init__(self)
self.root = root
def render_GET(self, request):
try:
action = request.args['op'][0]
keyid = request.args['search'][0]
except KeyError:
return 'Invalid Arguments %s' % request.args
return self.processRequest(action, keyid, request)
def processRequest(self, action, keyid, request):
# Sleep a short time so that tests can ensure that timeouts
# are properly handled by setting an even shorter timeout.
sleep(0.02)
if (action not in self.permitted_actions) or not keyid:
return 'Forbidden: "%s" on ID "%s"' % (action, keyid)
filename = '%s.%s' % (keyid, action)
path = locate_key(self.root, filename)
if path is not None:
content = cgi.escape(open(path).read())
page = ('<html>\n<head>\n'
'<title>Results for Key %s</title>\n'
'</head>\n<body>'
'<h1>Results for Key %s</h1>\n'
'<pre>\n%s\n</pre>\n</html>') % (keyid, keyid, content)
return page
else:
request.setResponseCode(404)
return KEY_NOT_FOUND_BODY
SUBMIT_KEY_PAGE = """
<html>
<head>
<title>Submit a key</title>
</head>
<body>
<h1>Submit a key</h1>
<p>%(banner)s</p>
<form method="post">
<textarea name="keytext" rows="20" cols="66"></textarea> <br>
<input type="submit" value="Submit">
</form>
</body>
</html>
"""
class SubmitKey(Resource):
isLeaf = True
def __init__(self, root):
Resource.__init__(self)
self.root = root
def render_GET(self, request):
return SUBMIT_KEY_PAGE % {'banner': ''}
def render_POST(self, request):
try:
keytext = request.args['keytext'][0]
except KeyError:
return 'Invalid Arguments %s' % request.args
return self.storeKey(keytext)
def storeKey(self, keytext):
gpghandler = getUtility(IGPGHandler)
try:
key = gpghandler.importPublicKey(keytext)
except (GPGKeyNotFoundError, SecretGPGKeyImportDetected,
MoreThanOneGPGKeyFound) as err:
return SUBMIT_KEY_PAGE % {'banner': str(err)}
filename = '0x%s.get' % key.fingerprint
path = os.path.join(self.root, filename)
fp = open(path, 'w')
fp.write(keytext)
fp.close()
return SUBMIT_KEY_PAGE % {'banner': 'Key added'}
| agpl-3.0 | -1,671,162,239,570,108,400 | 25.894737 | 75 | 0.609678 | false |
mozilla/mozilla-ignite | apps/challenges/tests/test_views.py | 1 | 32366 | # Note: not using cStringIO here because then we can't set the "filename"
from StringIO import StringIO
from copy import copy
from datetime import datetime, timedelta
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.messages import SUCCESS
from django.core.urlresolvers import reverse
from django.db.models import Max
from django.http import Http404
from django.test.utils import ContextList
from django.test import signals
from django.utils.functional import curry
from mock import Mock, patch, MagicMock
from nose.tools import assert_equal, with_setup, eq_, ok_
from test_utils import TestCase, RequestFactory
from commons.middleware import LocaleURLMiddleware
from challenges import views
from challenges.models import (Challenge, Submission, Phase, Category,
ExternalLink, SubmissionParent,
SubmissionVersion, SubmissionHelp)
from challenges.tests.fixtures import (challenge_setup, challenge_teardown,
create_users, create_submissions,
BLANK_EXTERNALS)
from challenges.tests.fixtures.ignite_fixtures import (setup_ignite_challenge,
teardown_ignite_challenge,
setup_ideation_phase,
create_submission,
create_user)
from ignite.tests.decorators import ignite_skip, ignite_only
from projects.models import Project
# Apply this decorator to a test to turn off the middleware that goes around
# inserting 'en_US' redirects into all the URLs
suppress_locale_middleware = patch.object(LocaleURLMiddleware,
'process_request',
lambda *args: None)
development_mock = MagicMock
development_mock.has_started = False
def _build_request(path=None):
request = Mock()
request.path = path
request._messages = [] # Stop messaging code trying to iterate a Mock
return request
@ignite_skip
@with_setup(challenge_setup, challenge_teardown)
def test_show_challenge():
"""Test the view to show an individual challenge."""
request = _build_request('/my-project/my-challenge/')
response = views.show(request, 'my-project', 'my-challenge')
assert_equal(response.status_code, 200)
class MessageTestCase(TestCase):
"""Test case class to check messaging."""
def assertSuccessMessage(self, response):
"""Assert that there is a success message in the given response."""
eq_(len(response.context['messages']), 1)
eq_(list(response.context['messages'])[0].level, SUCCESS)
class ChallengeEntryTest(TestCase):
# Need to inherit from this base class to get Jinja2 template hijacking
def setUp(self):
challenge_setup()
def tearDown(self):
challenge_teardown()
@ignite_skip
@suppress_locale_middleware
def test_no_entries(self):
"""Test that challenges display ok without any entries."""
response = self.client.get(Challenge.objects.get().get_absolute_url())
assert_equal(response.status_code, 200)
# Make sure the entries are present and in reverse creation order
assert_equal(len(response.context['entries'].object_list), 0)
@ignite_skip
@suppress_locale_middleware
def test_challenge_entries(self):
"""Test that challenge entries come through to the challenge view."""
submission_titles = create_submissions(3)
response = self.client.get(Challenge.objects.get().get_entries_url())
assert_equal(response.status_code, 200)
# Make sure the entries are present and in reverse creation order
assert_equal([s.title for s in response.context['entries'].object_list],
list(reversed(submission_titles)))
@suppress_locale_middleware
def test_entries_view(self):
"""Test the dedicated entries view.
This is currently a thin proxy onto the challenge view, hence this test
being practically identical to the one above.
"""
submission_titles = create_submissions(4)
phase = Phase.objects.get()
response = self.client.get(phase.get_absolute_url())
assert_equal(response.status_code, 200)
# Make sure the entries are present and in reverse creation order
assert_equal([s.title for s in response.context['entries'].object_list],
list(reversed(submission_titles)))
@suppress_locale_middleware
def test_hidden_entries(self):
"""Test that draft entries are not visible on the entries page."""
create_submissions(3)
submissions = Submission.objects.all()
hidden_submission = submissions[0]
hidden_submission.is_draft = True
hidden_submission.save()
phase = Phase.objects.get()
response = self.client.get(phase.get_absolute_url())
# Check the draft submission is hidden
assert_equal(set(response.context['entries'].object_list),
set(submissions[1:]))
@ignite_only
def test_winning_entries(self):
"""Test the winning entries view."""
create_submissions(5)
winners = Submission.objects.all()[1:3]
for entry in winners:
entry.is_winner = True
entry.save()
response = self.client.get(reverse('entries_winning'))
eq_(set(e.title for e in response.context['ideation_winners']),
set(e.title for e in winners))
assert_equal(len(response.context['development_winners']), 0)
def _build_links(initial_count, *forms):
prefix = 'externals'
form_data = {}
form_data.update({'%s-TOTAL_FORMS' % prefix: str(len(forms)),
'%s-INITIAL_FORMS' % prefix: str(initial_count),
'%s-MAX_NUM_FORMS' % prefix: ''})
for i, form in enumerate(forms):
for key, value in form.iteritems():
form_data['%s-%s-%s' % (prefix, i, key)] = value
return form_data
def _form_from_link(link_object):
return dict((k, getattr(link_object, k)) for k in ['id', 'name', 'url'])
class CreateEntryTest(TestCase):
"""Tests related to posting a new entry."""
def setUp(self):
challenge_setup()
self.category_id = Category.objects.get().id
self.project_slug, self.challenge_slug = (Project.objects.get().slug,
Challenge.objects.get().slug)
self.entry_form_path = '/en-US/%s/challenges/%s/entries/add/' % \
(self.project_slug, self.challenge_slug)
create_users()
def tearDown(self):
challenge_teardown()
@ignite_skip
def test_anonymous_form(self):
"""Check we can't display the entry form without logging in."""
response = self.client.get(self.entry_form_path)
# Check it's some form of redirect
assert response.status_code in xrange(300, 400)
@ignite_skip
def test_anonymous_post(self):
"""Check we can't post an entry without logging in."""
form_data = {'title': 'Submission',
'brief_description': 'A submission',
'description': 'A submission of shining wonderment.',
'created_by': User.objects.get(username='alex').id,
'category': self.category_id}
response = self.client.post(self.entry_form_path, data=form_data)
assert response.status_code in xrange(300, 400)
assert_equal(Submission.objects.count(), 0)
@ignite_skip
def test_display_form(self):
"""Test the new entry form."""
self.client.login(username='alex', password='alex')
response = self.client.get(self.entry_form_path)
assert_equal(response.status_code, 200)
# Check nothing gets created
assert_equal(Submission.objects.count(), 0)
@ignite_skip
def test_submit_form(self):
self.client.login(username='alex', password='alex')
alex = User.objects.get(username='alex')
form_data = {'title': 'Submission',
'brief_description': 'A submission',
'description': 'A submission of shining wonderment.',
'created_by': alex.get_profile(),
'category': self.category_id}
form_data.update(BLANK_EXTERNALS)
response = self.client.post(self.entry_form_path, data=form_data,
follow=True)
redirect_target = '/en-US/%s/challenges/%s/entries/' % \
(self.project_slug, self.challenge_slug)
self.assertRedirects(response, redirect_target)
# Make sure we actually created the submission
assert_equal([s.description for s in Submission.objects.all()],
['A submission of shining wonderment.'])
submission = Submission.objects.get()
assert_equal(submission.challenge.slug, self.challenge_slug)
assert_equal(submission.created_by.user, alex)
parent = SubmissionParent.objects.get()
assert_equal(parent.submission, submission)
@ignite_skip
def test_invalid_form(self):
"""Test that an empty form submission fails with errors."""
self.client.login(username='alex', password='alex')
response = self.client.post(self.entry_form_path, data=BLANK_EXTERNALS)
# Not so fussed about authors: we'll be re-working that soon enough
for k in ['Title', 'Summary']:
assert k in response.context['errors'], 'Missing error key %s' % k
assert_equal(Submission.objects.count(), 0)
@ignite_skip
def test_bad_image(self):
"""Test that a bad image is discarded."""
self.client.login(username='alex', password='alex')
alex = User.objects.get(username='alex')
bad_image_file = StringIO('kitten pictures')
bad_image_file.name = 'kittens.jpg'
form_data = {'title': 'Submission',
'brief_description': 'A submission',
'description': 'A submission of shining wonderment.',
'created_by': alex.get_profile(),
'category': self.category_id,
'sketh_note': bad_image_file}
form_data.update(BLANK_EXTERNALS)
response = self.client.post(self.entry_form_path, data=form_data)
assert response.context['errors'].get('Napkin sketch')
assert response.context['form']['sketh_note'].value() is None
assert_equal(Submission.objects.count(), 0)
@ignite_skip
@with_setup(challenge_setup, challenge_teardown)
def test_challenge_not_found():
"""Test behaviour when a challenge doesn't exist."""
request = _build_request('/my-project/not-a-challenge/')
try:
response = views.show(request, 'my-project', 'not-a-challenge')
except Http404:
pass
else:
assert_equal(response.status_code, 404)
@ignite_skip
@with_setup(challenge_setup, challenge_teardown)
def test_wrong_project():
"""Test behaviour when the project and challenge don't match."""
project_fields = {'name': 'Another project', 'slug': 'another-project',
'description': "Not the project you're looking for",
'long_description': 'Nothing to see here'}
other_project = Project.objects.create(**project_fields)
request = _build_request('/another-project/my-challenge/')
# We either want 404 by exception or by response code here: either is fine
try:
response = views.show(request, 'another-project', 'my-challenge')
except Http404:
pass
else:
assert_equal(response.status_code, 404)
class ShowEntryTest(TestCase):
"""Test functionality of the single entry view."""
def setUp(self):
self.initial_data = setup_ideation_phase(**setup_ignite_challenge())
self.profile = create_user('bob')
self.submission = create_submission(created_by=self.profile,
phase=self.initial_data['ideation_phase'])
self.parent = self.submission.parent
self.submission_path = self.submission.get_absolute_url()
def tearDown(self):
teardown_ignite_challenge()
def create_submission(self, **kwargs):
"""Helper to create a ``Submission``"""
defaults = {
'phase': self.initial_data['ideation_phase'],
'title': 'A submission',
'brief_description': 'My submission',
'description': 'My wonderful submission',
'created_by': self.profile,
'category': self.initial_data['category'],
}
if kwargs:
defaults.update(kwargs)
return Submission.objects.create(**defaults)
@suppress_locale_middleware
def test_show_entry(self):
url = reverse('entry_show', kwargs={'entry_id': self.submission.id,
'phase': 'ideas',})
response = self.client.get(url)
assert_equal(response.status_code, 200)
@suppress_locale_middleware
def test_entry_not_found(self):
# Get an ID that doesn't exist
bad_id = Submission.objects.aggregate(max_id=Max('id'))['max_id'] + 1
bad_path = '/my-project/challenges/my-challenge/entries/%d/' % bad_id
response = self.client.get(bad_path)
assert_equal(response.status_code, 404, response.content)
@suppress_locale_middleware
def test_old_versioned_entry(self):
new_submission = self.create_submission(title='Updated Submission!')
self.parent.update_version(new_submission)
response = self.client.get(self.submission_path)
assert_equal(response.status_code, 200)
eq_(response.context['entry'].title, 'Updated Submission!')
@suppress_locale_middleware
def test_new_versioned_entry(self):
new_submission = self.create_submission(title='Updated Submission!')
self.parent.update_version(new_submission)
response = self.client.get(new_submission.get_absolute_url())
assert_equal(response.status_code, 200)
eq_(response.context['entry'].title, 'Updated Submission!')
@suppress_locale_middleware
def test_failed_versioned_entry(self):
"""New versioned entries shouldn't change the url"""
new_submission = self.create_submission(title='Updated Submission!')
self.parent.update_version(new_submission)
url = reverse('entry_show', kwargs={'entry_id': new_submission.id,
'phase': 'ideas'})
response = self.client.get(url)
assert_equal(response.status_code, 404)
class EditEntryTest(MessageTestCase):
"""Test functionality of the edit entry view."""
def setUp(self):
challenge_setup()
phase = Phase.objects.get()
phase.name = 'Ideation'
phase.save()
create_users()
admin = User.objects.create_user('admin', '[email protected]',
password='admin')
admin.is_superuser = True
admin.save()
# Fill in the profile name to stop nag redirects
admin_profile = admin.get_profile()
admin_profile.name = 'Admin Adminson'
admin_profile.save()
alex_profile = User.objects.get(username='alex').get_profile()
create_submissions(1, creator=alex_profile)
entry = Submission.objects.get()
self.view_path = entry.get_absolute_url()
self.edit_path = entry.get_edit_url()
def tearDown(self):
teardown_ignite_challenge()
def open_phase(self):
phase = Phase.objects.get()
phase.start_date = datetime.utcnow() - timedelta(hours=1)
phase.end_date = datetime.utcnow() + timedelta(hours=1)
phase.save()
def close_phase(self):
phase = Phase.objects.get()
phase.start_date = datetime.utcnow() - timedelta(hours=1)
phase.end_date = datetime.utcnow() - timedelta(hours=1)
phase.save()
def _edit_data(self, submission=None):
if submission is None:
submission = Submission.objects.get()
return dict(title=submission.title,
brief_description='A submission',
description='A really, seriously good submission',
life_improvements='This will benefit mankind',
category=submission.category.id)
@suppress_locale_middleware
def test_edit_form(self):
self.client.login(username='alex', password='alex')
response = self.client.get(self.edit_path)
assert_equal(response.status_code, 200)
@suppress_locale_middleware
def test_edit(self):
self.client.login(username='alex', password='alex')
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data, follow=True)
self.assertRedirects(response, self.view_path)
# Check for a success message
self.assertSuccessMessage(response)
assert_equal(Submission.objects.get().description, data['description'])
@suppress_locale_middleware
def test_edit_closed_phase(self):
self.close_phase()
self.client.login(username='alex', password='alex')
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data, follow=True)
eq_(response.status_code, 403)
@suppress_locale_middleware
def test_anonymous_access(self):
"""Check that anonymous users can't get at the form."""
response = self.client.get(self.edit_path)
assert_equal(response.status_code, 302)
@suppress_locale_middleware
def test_anonymous_edit(self):
"""Check that anonymous users can't post to the form."""
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data)
assert_equal(response.status_code, 302)
assert 'seriously' not in Submission.objects.get().description
@suppress_locale_middleware
def test_non_owner_access(self):
"""Check that non-owners cannot see the edit form."""
self.client.login(username='bob', password='bob')
response = self.client.get(self.edit_path)
assert_equal(response.status_code, 403)
@suppress_locale_middleware
def test_non_owner_edit(self):
"""Check that users cannot edit each other's submissions."""
self.client.login(username='bob', password='bob')
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data)
assert_equal(response.status_code, 403)
assert 'seriously' not in Submission.objects.get().description
@suppress_locale_middleware
def test_admin_access(self):
"""Check that administrators can see the edit form."""
self.client.login(username='admin', password='admin')
response = self.client.get(self.edit_path)
assert_equal(response.status_code, 200)
@suppress_locale_middleware
def test_admin_edit(self):
"""Check that administrators can edit submissions."""
self.client.login(username='admin', password='admin')
data = self._edit_data()
data.update(BLANK_EXTERNALS)
response = self.client.post(self.edit_path, data)
self.assertRedirects(response, self.view_path)
assert_equal(Submission.objects.get().description, data['description'])
self.client.logout()
class EditLinkTest(TestCase):
def setUp(self):
self.initial_data = setup_ideation_phase(**setup_ignite_challenge())
self.profile = create_user('bob')
self.submission = create_submission(created_by=self.profile,
phase=self.initial_data['ideation_phase'])
self.view_path = self.submission.get_absolute_url()
self.edit_path = self.submission.get_edit_url()
ExternalLink.objects.create(submission=self.submission, name='Foo',
url='http://example.com/')
ExternalLink.objects.create(submission=self.submission, name='Foo',
url='http://example.net/')
self.client.login(username='bob', password='bob')
def tearDown(self):
teardown_ignite_challenge()
ExternalLink.objects.all().delete()
self.client.logout()
def _base_form(self):
submission = Submission.objects.get()
return {'title': submission.title,
'brief_description': submission.brief_description,
'description': submission.description,
'life_improvements': 'This will benefit mankind',
'category': submission.category.id}
@suppress_locale_middleware
def test_preserve_links(self):
"""Test submission when the links are not changed."""
form_data = self._base_form()
links = ExternalLink.objects.all()
form_data.update(_build_links(2, *map(_form_from_link, links)))
response = self.client.post(self.edit_path, form_data)
self.assertRedirects(response, self.view_path)
eq_(ExternalLink.objects.count(), 2)
@suppress_locale_middleware
def test_remove_links(self):
"""Test submission with blank link boxes.
All the links should be deleted, as the forms are blank."""
form_data = self._base_form()
links = ExternalLink.objects.all()
link_forms = [{'id': link.id} for link in links]
form_data.update(_build_links(2, *link_forms))
response = self.client.post(self.edit_path, form_data)
self.assertRedirects(response, self.view_path)
eq_(ExternalLink.objects.count(), 0)
@suppress_locale_middleware
def test_add_links(self):
"""Test adding links to a submission without any."""
ExternalLink.objects.all().delete()
form_data = self._base_form()
link_forms = [{'name': 'Cheese', 'url': 'http://cheese.com/'},
{'name': 'Pie', 'url': 'http://en.wikipedia.org/wiki/Pie'}]
form_data.update(_build_links(0, *link_forms))
response = self.client.post(self.edit_path, form_data)
self.assertRedirects(response, self.view_path)
eq_(ExternalLink.objects.count(), 2)
cheese_link = ExternalLink.objects.get(name='Cheese')
eq_(cheese_link.url, 'http://cheese.com/')
eq_(cheese_link.submission, Submission.objects.get())
class DeleteEntryTest(MessageTestCase):
def setUp(self):
challenge_setup()
create_users()
phase = Phase.objects.get()
phase.name = 'Ideation'
phase.save()
self.alex_profile = User.objects.get(username='alex').get_profile()
submission = self.create_submission()
self.parent = SubmissionParent.objects.create(submission=submission)
base_kwargs = {'project': Project.objects.get().slug,
'slug': Challenge.objects.get().slug}
self.view_path = submission.get_absolute_url()
self.delete_path = submission.get_delete_url()
def create_submission(self, **kwargs):
"""Helper to create a ``Submission``"""
defaults = {
'phase': Phase.objects.get(),
'title': 'A submission',
'brief_description': 'My submission',
'description': 'My wonderful submission',
'created_by': self.alex_profile,
'category': Category.objects.get()
}
if kwargs:
defaults.update(kwargs)
return Submission.objects.create(**defaults)
@suppress_locale_middleware
def test_anonymous_delete_form(self):
"""Check that anonymous users can't get at the form."""
response = self.client.get(self.delete_path)
assert_equal(response.status_code, 302)
@suppress_locale_middleware
def test_anonymous_delete(self):
"""Check that anonymous users can't delete entries."""
response = self.client.post(self.delete_path)
assert_equal(response.status_code, 302)
@suppress_locale_middleware
def test_non_owner_access(self):
"""Check that non-owners cannot see the delete form."""
self.client.login(username='bob', password='bob')
response = self.client.get(self.delete_path)
assert_equal(response.status_code, 404)
@suppress_locale_middleware
def test_non_owner_delete(self):
"""Check that users cannot delete each other's submissions."""
self.client.login(username='bob', password='bob')
response = self.client.post(self.delete_path, {})
assert_equal(response.status_code, 404)
assert Submission.objects.exists()
@suppress_locale_middleware
def test_delete_form(self):
self.client.login(username='alex', password='alex')
response = self.client.get(self.delete_path)
assert_equal(response.status_code, 200)
@suppress_locale_middleware
def test_delete(self):
self.client.login(username='alex', password='alex')
response = self.client.post(self.delete_path, {}, follow=True)
assert_equal(response.redirect_chain[0][1], 302)
assert_equal((Submission.objects.filter(created_by=self.alex_profile)
.count()), 0)
self.assertSuccessMessage(response)
assert_equal((SubmissionParent.objects
.filter(submission__created_by=self.alex_profile)
.count()), 0)
def test_delete_safety(self):
"""Test delete doesn't remove any other user content"""
self.client.login(username='alex', password='alex')
submission_b = self.create_submission(title='b')
SubmissionParent.objects.create(submission=submission_b)
response = self.client.post(self.delete_path, {}, follow=True)
self.assertSuccessMessage(response)
submission_list = Submission.objects.filter(created_by=self.alex_profile)
assert_equal(len(submission_list), 1)
assert_equal(submission_list[0], submission_b)
parent_list = (SubmissionParent.objects
.filter(submission__created_by=self.alex_profile))
assert_equal(len(parent_list), 1)
assert_equal(parent_list[0].submission, submission_b)
@suppress_locale_middleware
def test_delete_versioned_submission_past(self):
"""Deleting an old versioned ``Submission`` should fail"""
submission_b = self.create_submission(title='b')
self.parent.update_version(submission_b)
self.client.login(username='alex', password='alex')
response = self.client.post(self.delete_path, {})
assert_equal(response.status_code, 404)
@suppress_locale_middleware
def test_delete_versioned_submission(self):
"""Deleting a versioned ``Submission`` should take down all the related
content"""
submission_b = self.create_submission(title='b')
self.parent.update_version(submission_b)
self.client.login(username='alex', password='alex')
result = self.client.post(submission_b.get_delete_url(), {})
assert_equal((Submission.objects.filter(created_by=self.alex_profile)
.count()), 0)
assert_equal((SubmissionParent.objects
.filter(submission__created_by=self.alex_profile)
.count()), 0)
assert_equal((SubmissionVersion.objects
.filter(submission__created_by=self.alex_profile)
.count()), 0)
class SubmissionHelpViewTest(TestCase):
def setUp(self):
challenge_setup()
profile_list = create_users()
self.phase = Phase.objects.all()[0]
self.alex = profile_list[0]
self.category = Category.objects.all()[0]
create_submissions(1, self.phase, self.alex)
self.submission_a = Submission.objects.get()
self.parent = self.submission_a.parent
self.help_url = reverse('entry_help', args=[self.parent.slug])
self.valid_data = {
'notes': 'Help Wanted',
'status': SubmissionHelp.PUBLISHED,
}
def tearDown(self):
challenge_teardown()
for model in [SubmissionHelp]:
model.objects.all().delete()
def create_submission_help(self, **kwargs):
defaults = {'parent': self.parent,
'status': SubmissionHelp.PUBLISHED}
if kwargs:
defaults.update(kwargs)
instance, created = SubmissionHelp.objects.get_or_create(**defaults)
return instance
def test_submission_help_anon(self):
response = self.client.get(self.help_url)
eq_(response.status_code, 302)
self.assertTrue(reverse('login') in response['Location'])
response = self.client.post(self.help_url, self.valid_data)
eq_(response.status_code, 302)
self.assertTrue(reverse('login') in response['Location'])
def test_submission_help_not_owner(self):
self.client.login(username='bob', password='bob')
response = self.client.get(self.help_url)
eq_(response.status_code, 404)
response = self.client.post(self.help_url, self.valid_data)
eq_(response.status_code, 404)
def test_submission_published_help(self):
self.client.login(username='alex', password='alex')
response = self.client.get(self.help_url)
eq_(response.status_code, 200)
response = self.client.post(self.help_url, self.valid_data)
ok_(self.submission_a.get_absolute_url() in response['Location'])
eq_(SubmissionHelp.objects.get_active().count(), 1)
def test_submission_help_listing(self):
self.create_submission_help()
response = self.client.get(reverse('entry_help_list'))
eq_(response.status_code, 200)
page = response.context['page']
eq_(page.paginator.count, 1)
def test_submission_help_list_hidden(self):
self.create_submission_help(status=SubmissionHelp.DRAFT)
response = self.client.get(reverse('entry_help_list'))
eq_(response.status_code, 200)
page = response.context['page']
eq_(page.paginator.count, 0)
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
Entirely based on the Django Test Client
https://github.com/django/django/blob/master/django/test/client.py#L88
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
class TestAddSubmissionView(TestCase):
def __init__(self, *args, **kwargs):
super(TestAddSubmissionView, self).__init__(*args, **kwargs)
# Add context and template to the response
on_template_render = curry(store_rendered_templates, {})
signals.template_rendered.connect(on_template_render,
dispatch_uid="template-render")
def setUp(self):
self.factory = RequestFactory()
self.ideation = MagicMock()
def test_add_submission_get(self):
request = self.factory.get('/')
request.user = AnonymousUser()
request.development = development_mock
response = views.add_submission(request, self.ideation)
eq_(response.status_code, 200)
def test_invalid_form(self):
request = self.factory.post('/', BLANK_EXTERNALS)
request.user = AnonymousUser()
request.development = development_mock
response = views.add_submission(request, self.ideation)
eq_(response.status_code, 200)
| bsd-3-clause | -1,539,046,179,346,587,000 | 40.548139 | 86 | 0.623123 | false |
vitimm144/IEPS | backend/membros/admin.py | 1 | 3400 | from django.contrib import admin
from membros.models import Cargo, HistoricoEclesiastico, Contato
from membros.models import Membro, Endereco
from membros.models import HistoricoFamiliar
from membros.models import Teologia
from ieps.admin import admin_site
class ContatoInline(admin.StackedInline):
model = Contato
verbose_name = 'Contato'
verbose_name_plural = 'Contatos'
extra = 1
class EnderecoInline(admin.StackedInline):
model = Endereco
verbose_name = 'Endereço'
verbose_name_plural = 'Endereços'
fk_name = 'membro'
class HistoricofamiliarInline(admin.StackedInline):
model = HistoricoFamiliar
verbose_name = 'Histórico familiar'
verbose_name_plural = 'Históricos familiares'
class TeologiaInline(admin.StackedInline):
model = Teologia
verbose_name = 'Teologia'
class HistoricoEclesiasticoInline(admin.StackedInline):
model = HistoricoEclesiastico
verbose_name = 'Histórico eclesiático'
verbose_name_plural = 'Históricos eclesiásticos'
class HistoricoEclesiasticoAdmin(admin.ModelAdmin):
list_display = ('data_conversao', 'data_batismo', 'membro', 'cargo_names')
list_filter = ('data_conversao', 'data_batismo', 'membro')
search_fields = ('data_conversao', 'data_batismo', 'membro__nome')
class TeologiaAdmin(admin.ModelAdmin):
list_display = ('curso', 'instituição', 'duracao', 'membro')
list_filter = ('curso', 'instituição', 'duracao', 'membro')
seach_fields = ('curso', 'instituição', 'duracao', 'membro__nome')
class EnderecoAdmin(admin.ModelAdmin):
list_display = ('logradouro', 'numero', 'bairro', 'cep', 'membro')
list_filter = ('logradouro', 'numero', 'bairro', 'cep', 'membro')
search_fields = ('logradouro', 'bairro', 'cep', 'membro__nome')
class ContatoAdmin(admin.ModelAdmin):
list_display = ('residencial', 'celular1', 'celular2', 'membro')
list_filter = ('residencial', 'celular1', 'celular2', 'membro')
search_fields = ('residencial', 'celular1', 'celular2', 'membro__nome')
class MembroAdmin(admin.ModelAdmin):
inlines = (
EnderecoInline,
ContatoInline,
HistoricofamiliarInline,
TeologiaInline,
HistoricoEclesiasticoInline
)
list_display = ('matricula', 'nome', 'data_nascimento', 'endereco', 'contato')
list_filter = ('matricula', 'nome', 'sexo', 'data_nascimento',)
search_fields = (
'matricula',
'nome',
'data_nascimento',
'endereco__logradouro',
'endereco__numero'
)
class CargoAdmin(admin.ModelAdmin):
list_display = ('cargo', 'data_consagracao', 'igreja', 'cidade',)
list_filter = ('cargo', 'igreja', 'cidade',)
search_fields = ('cargo', 'data_consagracao', 'igreja', 'cidade')
class HistoricoFamiliarAdmin(admin.ModelAdmin):
list_display = ('estado_civil', 'data_casamento', 'nome_conjuje', 'membro')
list_filter = ('estado_civil', 'data_casamento', 'nome_conjuje', 'membro')
search_fields = ('estado_civil', 'data_casamento', 'nome_conjuje', 'membro__nome')
# Register your models here.
admin_site.register(Membro, MembroAdmin)
admin_site.register(Cargo, CargoAdmin)
admin_site.register(HistoricoEclesiastico, HistoricoEclesiasticoAdmin)
admin_site.register(Endereco, EnderecoAdmin)
admin_site.register(Contato, ContatoAdmin)
admin_site.register(HistoricoFamiliar, HistoricoFamiliarAdmin)
| gpl-2.0 | -1,551,099,497,174,133,800 | 32.524752 | 86 | 0.696692 | false |
michaelconnor00/gbdxtools | tests/unit/test_workflow.py | 1 | 4047 | """
Authors: Kostas Stamatiou, Donnie Marino
Contact: [email protected]
Unit test the workflow class
"""
from gbdxtools import Interface
from gbdxtools.workflow import Workflow
from auth_mock import get_mock_gbdx_session
import vcr
import unittest
import os
import json
"""
How to use the mock_gbdx_session and vcr to create unit tests:
1. Add a new test that is dependent upon actually hitting GBDX APIs.
2. Decorate the test with @vcr appropriately, supply a yaml file path to gbdxtools/tests/unit/cassettes
note: a yaml file will be created after the test is run
3. Replace "dummytoken" with a real gbdx token after running test successfully
4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
5. Replace the real gbdx token with "dummytoken" again
6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
"""
class WorkflowTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create mock session, replace dummytoken with real token to create cassette
mock_gbdx_session = get_mock_gbdx_session(token="dummytoken")
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
# setup mock data paths
cls.data_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data"))
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
wf = Workflow(self.gbdx)
self.assertTrue(isinstance(wf, Workflow))
self.assertTrue(wf.s3 is not None)
self.assertTrue(wf.gbdx_connection is not None)
@vcr.use_cassette('tests/unit/cassettes/test_batch_workflows.yaml', filter_headers=['authorization'])
def test_batch_workflows(self):
"""
tests all 3 endpoints for batch workflows, create, fetch, and cancel
:return:
"""
wf = Workflow(self.gbdx)
with open(os.path.join(self.data_path, "batch_workflow.json")) as json_file:
self.batch_workflow_json = json.loads(json_file.read())
# test create
batch_workflow_id = wf.launch_batch_workflow(self.batch_workflow_json)
# test status
batch_workflow_status = wf.batch_workflow_status(batch_workflow_id)
self.assertEqual(batch_workflow_id, batch_workflow_status.get("batch_workflow_id"))
# test cancel
batch_workflow_status = wf.batch_workflow_cancel(batch_workflow_id)
workflows = batch_workflow_status.get('workflows')
for workflow in workflows:
self.assertTrue(workflow.get('state') in ["canceling", "canceled"])
@vcr.use_cassette('tests/unit/cassettes/test_workflow_get.yaml', filter_headers=['authorization'])
def test_workflow_get(self):
"""
test gbdx.workflows.get(<workflow_id>)
"""
wf = Workflow(self.gbdx)
output = wf.get('4488969848362445219')
self.assertTrue('id' in output.keys())
self.assertTrue('owner' in output.keys())
self.assertTrue('submitted_time' in output.keys())
self.assertTrue('state' in output.keys())
self.assertTrue('callback' in output.keys())
self.assertTrue('tasks' in output.keys())
@vcr.use_cassette('tests/unit/cassettes/test_task_get_stdout.yaml', filter_headers=['authorization'])
def test_task_get_stdout(self):
"""
test gbdx.workflows.get_stdout(<workflow_id>,<task_id>)
"""
wf = Workflow(self.gbdx)
output = wf.get_stdout('4488969848362445219','4488969848354891944')
self.assertTrue(len(output) > 0)
@vcr.use_cassette('tests/unit/cassettes/test_task_get_stderr.yaml', filter_headers=['authorization'])
def test_task_get_stderr(self):
"""
test gbdx.workflows.get_stdout(<workflow_id>,<task_id>)
"""
wf = Workflow(self.gbdx)
output = wf.get_stderr('4488969848362445219','4488969848354891944')
self.assertEquals('<empty>', output)
| mit | 3,839,698,516,540,468,700 | 33.896552 | 110 | 0.67062 | false |
stormi/tsunami | src/bases/anaconf/analyseur.py | 1 | 9969 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from bases.anaconf.fichier_configuration import FichierConfiguration
"""Ce fichier définit la classe Analyseur détaillée plus bas.
Note : l'analyse se fait par évaluation de chaînes contenant du code Python.
Si ces chaînes font référence à des objets, fonctions, méthodes, elles doivent
être définies dans les globales de l'interpréteur grâce à la méthode
'_set_globales'. Voir l'aide pour plus d'informations.
"""
import os
import sys
class Analyseur:
"""Analyseur de fichiers de configuration.
Cette classe définit un gestionnaire de fichiers de configuration chargé
de lire, écrire et interpréter les données de configuration.
Le schéma d'exécution est le suivant :
- On souhaite charger un fichier de configuration. Le fichier est lu,
mis en mémoire et filtré. La forme des données de configuration
est des plus simples : une ligne, constituée du nom de l'information
suivie d'un signe égal (=) puis du résultat. Le résultat de la donnée
sera interprété par Python à l'aide de la fonction eval, au moment
où elle sera demandée. Cela permet d'avoir des données variables,
flottantes et propres à l'exécution d'une commande par exemple.
Une ligne peut être découpée en plusieurs morceaux. Un signe \ doit
être placé à la fin de la ligne qui doit se prolonger. Ce signe
n'a aucun effet sur la dernière ligne du fichier.
Notez que d'autres délimiteurs sont possibles pour coller au
maximum à la syntaxe Python. Une ligne se terminant par exemple par
une virgule ou un crochet ouvrant sera considérée comme se
poursuivant plus bas.
Une ligne peut également être un commentaire, elle commencera alors par
'#' et sera ignorée.
Note: si dans le fichier de configuration, une référence est faite
à une fonction ou une classe, il est nécessaire que la fonction ou
classe soit déclarée comme globale de l'interpréteur (voir
'_set_globales').
- Lors de la lecture, chaque nom de donnée est stocké à même l'objet,
en tant qu'attribut d'instance. Ainsi les noms des donénes devront
respecter une syntaxe Python, sans espaces ni accents ni caractères
spéciaux, hormis le signe souligné (_). Il est également préférable
de n'avoir aucun caractère en majuscule, pour des raisons de
convention. Le résultat de la donnée est enregistré en tant
que valeur de l'attribut, mais non interprété.
Les commentaires sont tout simplement ignorés.
- On demande à l'objet d'obtenir la valeur d'une donnée. Dans ce cas et
dans ce cas uniquement, la donnée est interprétée, puis retournée.
On se base sur la fonction eval pour interpréter la donnée.
Exemple :
* fichier general.cfg
port = 3933
attente_connexion = 0.2
* le constructeur d'Analyseur construit un objet contenant deux
attributs, port et attente_connexion
* quand on souhaite y accéder, on interprète la donnée
Autrement dit, analyseur.port appellera eval("3933")
Cela est rendu possible par la redéfinition de __getattribute__
"""
def __init__(self, nom_fichier, nom_defaut, defaut, logger):
"""Lit et charge un fichier de configuration.
Cette méthode permet de lire et enregistrer les données de
configuration propres au fichier de configuration.
Notez qu'un analyseur doit être construit depuis la méthode
'get_config' d''anaconf' et non directement.
On passe en paramètre du constructeur :
* Le nom du fichier de configuration. Il permet naturellement
de l'ouvrir, étape indispensable pour l'analyser
* Le nom du fichier par défaut (il s'agit d'un repère, si une erreur
est trouvée dans l'interprétation du modèle)
* La chaîne contenant le fichier de configuration par défaut.
Cette chaîne se trouve en dur dans le code. Elle est
indispensable : un fichier de configuration ne peut être interprété
sans son modèle. Pour le modèle de la configuration globale, voir :
src/corps/config.py
* Le logger : il est propre à cet analyseur et permet de faire
remonter les erreurs liées à l'interprétation du modèle ou du
fichier de configuration
La configuration trouvée dans le fichier prime naturellement sur
celle par défaut. La chaîne defaut n'est utilisé que si des
données ne sont pas trouvées, ou pour effacer des données périmées.
"""
self._globales = {
'oui': True,
'non': False,
'on': True,
'off': False,
}
self._logger = logger
self._logger.filtrer_niveau("warning")
# On cherche le fichier pour commencer
fichier_charge = None
if not os.path.exists(nom_fichier):
self._logger.info("Le fichier de configuration {} n'existe pas " \
"encore".format(nom_fichier))
elif not os.access(nom_fichier, os.R_OK):
self._logger.warning("Le fichier de configuration {} n'est pas " \
"accessible en lecture".format(nom_fichier))
elif not os.access(nom_fichier, os.W_OK):
self._logger.warning("Le fichier de configuration {} n'est pas " \
"accessible en écriture".format(nom_fichier))
elif not os.path.isfile(nom_fichier):
self._logger.warning("Le fichier de configuration {} n'est pas un " \
"fichier, accès impossible".format(nom_fichier))
else: # on va pouvoir lire le fichier
with open(nom_fichier, 'r') as fichier_conf:
contenu = fichier_conf.read()
# On analyse le fichier de configuration
fichier_charge = FichierConfiguration(nom_fichier, contenu,
self._logger)
# On analyse le fichier par défaut
fichier_defaut = FichierConfiguration(nom_defaut, defaut,
self._logger)
# On met à jour self
complet = dict(fichier_defaut.donnees)
if fichier_charge:
complet.update(fichier_charge.donnees)
# On met à jour self.__dict__
self.__dict__.update(complet)
# On réenregistre le fichier de configuration si nécessaire
if fichier_charge is None or fichier_defaut.donnees.keys() != \
fichier_charge.donnees.keys():
self._logger.info("On réécrit le fichier {}".format(nom_fichier))
fichier_conf = open(nom_fichier, 'w')
# On demande au fichier par défaut de prendre en compte les
# données de configuration du fichier chargé
if fichier_charge:
fichier_defaut.mettre_a_jour(fichier_charge)
fichier_conf.write(fichier_defaut.fichier.strip("\n"))
fichier_conf.close()
def __getattribute__(self, nom):
"""Retourne l'évaluation de la donnée de configuration."""
if nom.startswith("_") or not object.__getattribute__(self, nom):
return object.__getattribute__(self, nom)
elif nom in self.__dict__.keys():
attribut = object.__getattribute__(self, nom)
return eval(attribut, self._globales)
else:
raise ValueError("La donnée '{}' n'a pu être trouvée dans " \
"cette configuration".format(nom))
def _set_globales(self, globales):
"""Paramètre les globales, données sous la forme d'un dictionnaires.
Ces globales sont utilisées dans l'évaluation de données de
configuration.
Si par exemple une de vos données de configuration fait appel à la
fonction 'randrange', il faut l'ajouter dans les globales.
>>> import random
>>> analyseur = Analyseur("....cfg")
>>> analyseur._set_globales({"randrange": random.randrange})
>>> analyseur.hasard # contient randrange(8)
6
"""
self._globales.update(globales)
| bsd-3-clause | -2,331,608,658,569,466,400 | 47.384236 | 81 | 0.661474 | false |
jku/telepathy-gabble | tests/twisted/jingle/decloak-peer.py | 1 | 2246 | """
Test use-case when client attempts to call an unsubscribed contact. Gabble
should ask them to "de-cloak".
"""
from gabbletest import exec_test
from servicetest import (make_channel_proxy, call_async, sync_dbus,
assertEquals, assertLength)
import jingletest
import dbus
from twisted.words.xish import xpath
import constants as cs
import ns
def test(q, bus, conn, stream):
jt = jingletest.JingleTest(stream, 'test@localhost', '[email protected]/Foo')
jt2 = jingletest.JingleTest(stream, 'test@localhost', '[email protected]/Foo')
# Make gabble think this is a different client
jt2.remote_caps['node'] = 'http://example.com/fake-client1'
run_test(q, bus, conn, stream, jt, True)
run_test(q, bus, conn, stream, jt2, False)
def run_test(q, bus, conn, stream, jt, decloak_allowed):
"""
Requests streams on a media channel to jt.remote_jid without having their
presence at all.
"""
request = dbus.Dictionary({ cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_ID: jt.remote_jid
}, signature='sv')
path, props = conn.CreateChannel(request, dbus_interface=cs.CONN_IFACE_REQUESTS)
media_iface = make_channel_proxy(conn, path, 'Channel.Type.StreamedMedia')
handle = props[cs.TARGET_HANDLE]
call_async(q, media_iface, 'RequestStreams', handle,
[cs.MEDIA_STREAM_TYPE_AUDIO])
e = q.expect('stream-presence',
to=jt.remote_bare_jid, presence_type=None)
nodes = xpath.queryForNodes('/presence/temppres[@xmlns="%s"]'
% ns.TEMPPRES, e.stanza)
assertLength(1, nodes)
assertEquals('media', nodes[0].getAttribute('reason'))
if decloak_allowed:
jt.send_remote_presence()
info_event = q.expect('stream-iq', query_ns=ns.DISCO_INFO,
to=jt.remote_jid)
jt.send_remote_disco_reply(info_event.stanza)
# RequestStreams should now happily complete
q.expect('dbus-return', method='RequestStreams')
else:
q.expect('dbus-error', method='RequestStreams',
name=cs.OFFLINE)
if __name__ == '__main__':
exec_test(test, timeout=10)
| lgpl-2.1 | -7,175,395,740,582,618,000 | 34.09375 | 84 | 0.643811 | false |
ScalaInc/exp-python2-sdk | tests/test_auth.py | 1 | 1115 | import unittest
import requests
import time
from . import utils
class AuthBase (object):
def test_authentication (self):
self.exp.get_auth()
def test_token_refresh (self):
self.exp._sdk.authenticator._login()
self.exp._sdk.authenticator._refresh()
def test_refresh_401 (self):
auth = self.exp.get_auth()
auth['token'] = auth['token'] + 'blah'
self.exp._sdk.options['uuid'] = 'blah'
self.exp._sdk.options['username'] = 'blah'
try:
self.exp._sdk.authenticator._refresh()
except self.exp_sdk.AuthenticationError:
pass
else:
raise Exception
class TestDeviceAuth (AuthBase, utils.Device, unittest.TestCase): pass
class TestUserAuth (AuthBase, utils.User, unittest.TestCase): pass
class TestConsumerAuth (AuthBase, utils.Consumer, unittest.TestCase): pass
class TestDevice401 (utils.Base, unittest.TestCase):
def test_login_401 (self):
self.device_credentials['uuid'] = 'wrong uuid'
try:
exp = self.exp_sdk.start(**self.device_credentials)
except self.exp_sdk.AuthenticationError:
pass
else:
raise Exception
| mit | 7,760,974,650,108,637,000 | 23.23913 | 74 | 0.686996 | false |
eXistenZNL/SickRage | sickbeard/providers/thepiratebay.py | 1 | 9310 | # Author: Mr_Orange <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import time
import re
import urllib, urllib2, urlparse
import sys
import os
import datetime
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard import db
from sickbeard import classes
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import clients
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
from sickbeard.common import Overview
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
import requests
from requests import exceptions
from unidecode import unidecode
class ThePirateBayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "ThePirateBay")
self.supportsBacklog = True
self.enabled = False
self.ratio = None
self.confirmed = False
self.minseed = None
self.minleech = None
self.cache = ThePirateBayCache(self)
self.urls = {'base_url': 'https://thepiratebay.gd/'}
self.url = self.urls['base_url']
self.searchurl = self.url + 'search/%s/0/7/200' # order by seed
self.re_title_url = '/torrent/(?P<id>\d+)/(?P<title>.*?)//1".+?(?P<url>magnet.*?)//1".+?(?P<seeders>\d+)</td>.+?(?P<leechers>\d+)</td>'
def isEnabled(self):
return self.enabled
def imageName(self):
return 'thepiratebay.png'
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
ep_string = show_name + ' Season ' + str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%02d" % ep_obj.scene_absolute_number
search_string['Season'].append(ep_string)
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season)
search_string['Season'].append(ep_string)
ep_string = show_name + ' Season ' + str(ep_obj.scene_season) + ' -Ep*'
search_string['Season'].append(ep_string)
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if self.show.air_by_date:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', ' ')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%02i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + '|' + \
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
if mode != 'RSS':
searchURL = self.searchurl % (urllib.quote(search_string))
else:
searchURL = self.url + 'tv/latest/'
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
re_title_url = self.proxy._buildRE(self.re_title_url).replace('&f=norefer', '')
matches = re.compile(re_title_url, re.DOTALL).finditer(urllib.unquote(data))
for torrent in matches:
title = torrent.group('title')
url = torrent.group('url')
id = int(torrent.group('id'))
seeders = int(torrent.group('seeders'))
leechers = int(torrent.group('leechers'))
#Filter unseeded torrent
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
#Accept Torrent only from Good People for every Episode Search
if self.confirmed and re.search('(VIP|Trusted|Helper|Moderator)', torrent.group(0)) is None:
logger.log(u"ThePirateBay Provider found result " + torrent.group(
'title') + " but that doesn't seem like a trusted result so I'm ignoring it", logger.DEBUG)
continue
if not title or not url:
continue
item = title, url, id, seeders, leechers
items[mode].append(item)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if title:
title = self._clean_title_from_provider(title)
if url:
url = url.replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, search_date, self.show))
return results
def seedRatio(self):
return self.ratio
class ThePirateBayCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll ThePirateBay every 10 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['rss']}
return {'entries': self.provider._doSearch(search_params)}
provider = ThePirateBayProvider()
| gpl-3.0 | 9,199,119,350,806,031,000 | 37.630705 | 143 | 0.574544 | false |
Ulauncher/Ulauncher | setup.py | 1 | 8132 | #!/usr/bin/env python3
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
import os
import sys
from itertools import takewhile, dropwhile
try:
import DistUtilsExtra.auto
except ImportError:
print('To build ulauncher you need "python3-distutils-extra"', file=sys.stderr)
sys.exit(1)
assert DistUtilsExtra.auto.__version__ >= '2.18', \
'needs DistUtilsExtra.auto >= 2.18'
def update_config(libdir, values={}):
filename = os.path.join(libdir, 'ulauncher/config.py')
oldvalues = {}
try:
fin = open(filename, 'r')
fout = open(filename + '.new', 'w')
for line in fin:
fields = line.split(' = ') # Separate variable from value
if fields[0] in values:
oldvalues[fields[0]] = fields[1].strip()
line = "%s = %s\n" % (fields[0], values[fields[0]])
fout.write(line)
fout.flush()
fout.close()
fin.close()
os.rename(fout.name, fin.name)
except (OSError, IOError):
print("ERROR: Can't find %s" % filename)
sys.exit(1)
return oldvalues
def move_desktop_file(root, target_data, prefix):
# The desktop file is rightly installed into install_data. But it should
# always really be installed into prefix, because while we can install
# normal data files anywhere we want, the desktop file needs to exist in
# the main system to be found. Only actually useful for /opt installs.
old_desktop_path = os.path.normpath(root + target_data + '/share/applications')
old_desktop_file = old_desktop_path + '/ulauncher.desktop'
desktop_path = os.path.normpath(root + prefix + '/share/applications')
desktop_file = desktop_path + '/ulauncher.desktop'
if not os.path.exists(old_desktop_file):
print("ERROR: Can't find", old_desktop_file)
sys.exit(1)
elif target_data != prefix + '/':
# This is an /opt install, so rename desktop file to use extras-
desktop_file = desktop_path + '/extras-ulauncher.desktop'
try:
os.makedirs(desktop_path)
os.rename(old_desktop_file, desktop_file)
os.rmdir(old_desktop_path)
except OSError as e:
print("ERROR: Can't rename", old_desktop_file, ":", e)
sys.exit(1)
return desktop_file
def update_desktop_file(filename, target_pkgdata, target_scripts):
def is_env(p):
return p == 'env' or '=' in p
try:
fin = open(filename, 'r')
fout = open(filename + '.new', 'w')
for line in fin:
if 'Exec=' in line:
cmd = line.split("=", 1)[1]
# persist env vars
env_vars = ''
if cmd.startswith('env '):
env_vars = ' '.join(list(takewhile(is_env, cmd.split()))) \
+ ' '
cmd = ' '.join(list(dropwhile(is_env, cmd.split())))
cmd = cmd.split(None, 1)
line = "Exec=%s%s%s" % (env_vars, target_scripts, 'ulauncher')
if len(cmd) > 1:
line += " %s" % cmd[1].strip() # Add script arguments back
line += "\n"
fout.write(line)
fout.flush()
fout.close()
fin.close()
os.rename(fout.name, fin.name)
except (OSError, IOError):
print("ERROR: Can't find %s" % filename)
sys.exit(1)
class InstallAndUpdateDataDirectory(DistUtilsExtra.auto.install_auto):
def run(self):
DistUtilsExtra.auto.install_auto.run(self)
target_data = '/' + os.path.relpath(self.install_data, self.root) + '/'
target_pkgdata = target_data + 'share/ulauncher/'
target_scripts = '/' + os.path.relpath(self.install_scripts,
self.root) + '/'
values = {'__ulauncher_data_directory__': "'%s'" % (target_pkgdata),
'__version__': "'%s'" % self.distribution.get_version()}
update_config(self.install_lib, values)
desktop_file = move_desktop_file(self.root, target_data, self.prefix)
update_desktop_file(desktop_file, target_pkgdata, target_scripts)
class DataFileList(list):
def append(self, item):
# don't add node_modules to data_files that DistUtilsExtra tries to
# add automatically
filename = item[1][0]
if 'node_modules' in filename \
or 'bower_components' in filename or '.tmp' in filename:
return
else:
return super(DataFileList, self).append(item)
def exclude_files(patterns=[]):
"""
Suppress completely useless warning about files DistUtilsExta.aut does
recognize because require developer to scroll past them to get to useful
output.
Example of the useless warnings:
WARNING: the following files are not recognized by DistUtilsExtra.auto:
Dockerfile.build
Dockerfile.build-arch
Dockerfile.build-rpm
PKGBUILD.template
scripts/aur-update.py
"""
# it's maddening the DistUtilsExtra does not offer a way to exclude globs
# from it's scans and just using "print" to print the warning instead of
# using warning module which has a mechanism for suppressions
# it forces us to take the approach of monkeypatching their src_find
# function.
original_src_find = DistUtilsExtra.auto.src_find
def src_find_with_excludes(attrs):
src = original_src_find(attrs)
for pattern in patterns:
DistUtilsExtra.auto.src_markglob(src, pattern)
return src
DistUtilsExtra.auto.src_find = src_find_with_excludes
return original_src_find
def main():
# exclude files/folder patterns from being considered by distutils-extra
# this returns the original DistUtilsExtra.auto.src_find function
# so we can patch bit back in later
original_find_src = exclude_files([
"*.sh",
"ul",
"Dockerfile.build*",
"PKGBUILD.template",
"scripts/*",
"docs/*",
"glade",
"test",
"ulauncher.desktop.dev",
"requirements.txt",
])
DistUtilsExtra.auto.setup(
name='ulauncher',
version='%VERSION%',
license='GPL-3',
author='Aleksandr Gornostal',
author_email='[email protected]',
description='Application launcher for Linux',
url='https://ulauncher.io',
data_files=DataFileList([
('share/icons/hicolor/48x48/apps', [
'data/media/icons/hicolor/ulauncher.svg'
]),
('share/icons/hicolor/48x48/apps', [
'data/media/icons/hicolor/ulauncher-indicator.svg'
]),
('share/icons/hicolor/scalable/apps', [
'data/media/icons/hicolor/ulauncher.svg'
]),
('share/icons/hicolor/scalable/apps', [
'data/media/icons/hicolor/ulauncher-indicator.svg'
]),
# for fedora + GNOME
('share/icons/gnome/scalable/apps', [
'data/media/icons/hicolor/ulauncher.svg'
]),
('share/icons/gnome/scalable/apps', [
'data/media/icons/hicolor/ulauncher-indicator.svg'
]),
# for ubuntu
('share/icons/breeze/apps/48', [
'data/media/icons/ubuntu-mono-light/ulauncher-indicator.svg'
]),
('share/icons/ubuntu-mono-dark/scalable/apps', [
'data/media/icons/ubuntu-mono-dark/ulauncher-indicator.svg'
]),
('share/icons/ubuntu-mono-light/scalable/apps', [
'data/media/icons/ubuntu-mono-light/ulauncher-indicator.svg'
]),
('share/icons/elementary/scalable/apps', [
'data/media/icons/elementary/ulauncher-indicator.svg'
]),
]),
cmdclass={'install': InstallAndUpdateDataDirectory}
)
# unpatch distutils-extra src_find
DistUtilsExtra.auto.src_find = original_find_src
if __name__ == '__main__':
main()
| gpl-3.0 | 1,316,542,955,617,081,900 | 32.603306 | 83 | 0.581776 | false |
Duke-LeTran/practice-and-notes | matplotlib.py | 1 | 1445 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 17:32:58 2020
@author: dukel
"""
#%% 00. Initialize
import matplotlib.pyplot as plt
ls_x = [1,2,3,4]
ls_y = [1,4,9,16]
ls_z = [2,3,4,5]
#%% I. Basic Plotting
plt.plot(ls_x, # x
ls_y, # y
color='green',
linestyle='dashed',
label='dashed') #to add to legend
plt.plot([2,3,4,5], # x
[2,3,4,5], # y
color='#2B5B84',
linestyle='dashdot',
label='dashed-dot') #to add to legend
# options to show
plt.title('TESTING123!@#') #title
plt.xlabel('lol whats up') #x axis label
plt.ylabel('Important Figures') #y axis label
plt.legend()
plt.show()
#%% II. Subplots
## FIRST PANEL
plt.subplot(2, #rows
1, #columns
1) #1st panel <--- THIS IS KEY
plt.plot(ls_x,
ls_y,
color='green',
linestyle='dashdot')
## SECOND PANEL
plt.subplot(2, #rows
1, #columns
2) #1st panel <--- THIS IS KEY
plt.plot(ls_z,
ls_z,
color='#2B5B84',
linestyle='dashed')
## plt.show()
plt.show()
#%% III. Setting axis limits
## FIRST PANEL
panel_1 = plt.subplot(2,1,1)
plt.plot(ls_x, ls_y, color='green', linestyle='dashdot')
panel_1.set_xlim([0,6]) # set boundaries, aka limits, to x-axis
panel_1.set_xlim([0,20])
## SECOND PANEL
panel_2 = plt.subplot(2,1,2)
plt.plot(ls_z, ls_z, color='#2B5B84', linestyle='dashed')
panel_2.set_xlim([0,6])
plt.show()
| mit | 7,879,215,343,814,245,000 | 19.942029 | 63 | 0.56609 | false |
shootsoft/practice | lintcode/NineChapters/03/binary-tree-serialization.py | 1 | 4169 | __author__ = 'yinjun'
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
this.val = val
this.left, this.right = None, None
"""
class Solution:
'''
@param root: An object of TreeNode, denote the root of the binary tree.
This method will be invoked first, you should design your own algorithm
to serialize a binary tree which denote by a root node to a string which
can be easily deserialized by your own "deserialize" method later.
'''
def serialize(self, root):
# write your code here
result = []
result.append(self.preorderTraversal(root))
result.append(self.inorderTraversal(root))
return result
def inorderTraversal(self, root):
# write your code here
stack = []
dict = {}
dictStack = {}
result = []
if root == None:
return result
if root.right!=None:
stack.append(root.right)
dictStack[root.right] = 1
stack.append(root)
dictStack[root] = 1
if root.left!=None:
stack.append(root.left)
dictStack[root.left] = 1
l = len(stack)
while l>0:
#print result
p = stack.pop()
dictStack.pop(p)
l -= 1
if p.left ==None or p.left !=None and p.left in dict:
dict[p] = 1
result.append(p.val)
if p.right!=None and p.right not in dictStack:
stack.append(p.right)
dictStack[p.right] = 1
l += 1
else:
if p.right!=None:
stack.append(p.right)
dictStack[p.right] = 1
stack.append(p)
dictStack[p] = 1
if p.left!=None:
stack.append(p.left)
dictStack[p.left] = 1
l = len(stack)
return result
def preorderTraversal(self, root):
# write your code here
stack = []
result = []
if root == None:
return result
stack.append(root)
l = 1
while l>0:
p = stack.pop()
l -= 1
result.append(p.val)
if p.right != None:
stack.append(p.right)
l += 1
if p.left != None:
stack.append(p.left)
l += 1
return result
'''
@param data: A string serialized by your serialize method.
This method will be invoked second, the argument data is what exactly
you serialized at method "serialize", that means the data is not given by
system, it's given by your own serialize method. So the format of data is
designed by yourself, and deserialize it here as you serialize it in
"serialize" method.
'''
def deserialize(self, data):
# write your code here
if data==None or data == [] or len(data)!=2:
return None
return self.buildTree(data[0], data[1])
def buildTree(self, preorder, inorder):
# write your code here
if preorder ==[] and inorder == []:
return None
root = TreeNode(preorder[0])
inpos = inorder.index(preorder[0])
if inpos>0:
left_pre = preorder[1:inpos+1]
left_in = inorder[0:inpos]
root.left = self.buildTree(left_pre, left_in)
length = len(inorder)
if inpos + 1 < length:
right_pre = preorder[inpos+1:]
right_in = inorder[inpos+1:]
root.right = self.buildTree(right_pre, right_in)
return root
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
s = Solution()
n1 = TreeNode(1)
n2 = TreeNode(2)
n3 = TreeNode(3)
n4 = TreeNode(4)
n1.left = n2
n2.left = n3
n3.left = n4
print s.serialize(n1)
#print s.serialize(n1)
#print s.serialize(s.deserialize([1, '#', 2]))
#print s.serialize(s.deserialize([1,2,3,'#','#',4,5]))
#print s.serialize(s.deserialize([1, 2, '#', 3, '#',4])) | apache-2.0 | 7,999,381,860,744,141,000 | 24.582822 | 77 | 0.523387 | false |
pmutale/www.mutale.nl | stick2uganda/models.py | 1 | 3647 | from __future__ import unicode_literals
from ckeditor.fields import RichTextField
from filer.fields.image import FilerImageField
from cms.models import CMSPlugin, PlaceholderField
from django.utils.translation import ugettext_lazy as _
from django.db import models
class Project(models.Model):
name = models.CharField(max_length=128, null=False)
image = FilerImageField(null=True, blank=True)
project_summary = RichTextField(null=True, blank=True)
location = models.CharField(max_length=128, null=False)
start_implementation = models.DateField(null=True, blank=True)
end_implementation = models.DateField(null=True, blank=True)
def __str__(self):
return '{} in {}'.format(self.name, self.location)
class ContactPerson(models.Model):
name = models.CharField(max_length=128)
telephone = models.CharField(max_length=128, null=True, blank=True)
email = models.EmailField(max_length=128, null=True, blank=True)
project = models.ForeignKey('Project', on_delete=models.CASCADE, null=True, related_name='projects_contact')
def __str__(self):
return '{} - {}'.format(self.name, self.email)
class Report(models.Model):
version = models.CharField(max_length=128, null=True, blank=True,
help_text=(_('Use numbers <small>e.g</small> A or B')))
completed = models.DateField(null=True, blank=True)
question = models.ForeignKey('Question', on_delete=models.CASCADE, related_name='questions')
project = models.ForeignKey('Project', on_delete=models.CASCADE, null=True, related_name='projects_report')
class Meta:
permissions = (
('can_add_report', 'Can add Rreport'),
('can_edit_report', 'Can edit Report')
)
def __str__(self):
return '{} completed on {}'.format(self.version, self.completed)
class Question(models.Model):
def composition():
outline = {
'findings': u'<p>Add Your Findings here<p><p>Identify Your Audience here<p>'
u'<p>Add Your Findings Description here<p><p>Add Your Conclusion and Recommendations here<p>',
}
return outline
# f_default = composition()
# defaults = f_default.values()
number = models.IntegerField(null=True, blank=True, help_text=(_('Use numbers <small>e.g</small> 1, 2 or 3')))
question = models.CharField(max_length=128, null=True, blank=True)
findings = RichTextField(null=True, blank=True,
default=composition()['findings'],
help_text=_(
'Do not delete the tags <pre><code><p> ... <p></code></pre>'
))
image = models.ImageField(max_length=128000, null=True, blank=True, upload_to='media/project')
project = models.ForeignKey('Project', on_delete=models.CASCADE, null=True, related_name='projects_question')
add_findings_placeholder = PlaceholderField(slotname='add_findings')
class Meta:
permissions = (
('can_add_question', 'Can add Question'),
('can_edit_question', 'Can edit Question')
)
def __str__(self):
return '{} for {}'.format(self.number, self.project)
class Stick2UgandaPlugin(CMSPlugin):
info = RichTextField(null=True, blank=True)
intro_small = models.CharField(max_length=128, null=True, blank=True)
def __str__(self):
return self.stick2uganda.name
class S2UImagePlugin(CMSPlugin):
image = FilerImageField(blank=True, null=True)
title = models.CharField(max_length=128, blank=True, null=True)
| unlicense | -6,166,577,921,716,085,000 | 38.215054 | 118 | 0.64601 | false |
noirbizarre/flask-fs | docs/conf.py | 1 | 10269 | # -*- coding: utf-8 -*-
#
# Flask-FS documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 6 12:44:29 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'alabaster',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-FS'
copyright = u'2016, Axel Haustant'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = __import__('flask_fs').__version__
# The short X.Y version.
version = '.'.join(release.split('.')[:1])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flaskfs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# 'logo': 'logo-512.png',
# 'logo_name': True,
# 'touch_icon': 'apple-180.png',
'github_user': 'noirbizarre',
'github_repo': 'flask-fs',
'github_banner': True,
'show_related': True,
# 'page_width': '',
# 'sidebar_width': '260px',
'favicons': {
64: 'favicon-64.png',
128: 'favicon-128.png',
196: 'favicon-196.png',
},
'badges': [(
# Gitter.im
'https://badges.gitter.im/Join%20Chat.svg',
'https://gitter.im/noirbizarre/flask-fs',
'Join the chat at https://gitter.im/noirbizarre/flask-fs'
), (
# Github Fork
'https://img.shields.io/github/forks/noirbizarre/flask-fs.svg?style=social&label=Fork',
'https://github.com/noirbizarre/flask-fs',
'Github repository',
), (
# Github issues
'https://img.shields.io/github/issues-raw/noirbizarre/flask-fs.svg',
'https://github.com/noirbizarre/flask-fs/issues',
'Github repository',
), (
# License
'https://img.shields.io/github/license/noirbizarre/flask-fs.svg',
'https://github.com/noirbizarre/flask-fs',
'License',
), (
# PyPI
'https://img.shields.io/pypi/v/flask-fs.svg',
'https://pypi.python.org/pypi/flask-fs',
'Latest version on PyPI'
)]
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path(), '_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
'badges.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-FSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Flask-FS.tex', u'Flask-FS Documentation',
u'Axel Haustant', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-storages', u'Flask-FS Documentation',
[u'Axel Haustant'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flask-FS', u'Flask-FS Documentation',
u'Axel Haustant', 'Flask-FS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'flask': ('http://flask.pocoo.org/docs/', None),
'python': ('http://docs.python.org/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'boto': ('https://boto3.readthedocs.org/en/latest/', None),
'mongo': ('http://docs.mongoengine.org/', None),
}
| mit | 1,705,845,865,941,998,600 | 30.5 | 95 | 0.678158 | false |
petervaro/coublet | views/stream.py | 1 | 3574 | ## INFO ########################################################################
## ##
## COUBLET ##
## ======= ##
## ##
## Cross-platform desktop client to follow posts from COUB ##
## Version: 0.6.93.172 (20140814) ##
## ##
## File: views/stream.py ##
## ##
## Designed and written by Peter Varo. Copyright (c) 2014 ##
## License agreement is provided in the LICENSE file ##
## For more info visit: https://github.com/petervaro/coub ##
## ##
## Copyright (c) 2014 Coub Ltd and/or its suppliers and licensors, ##
## 5 Themistokli Dervi Street, Elenion Building, 1066 Nicosia, Cyprus. ##
## All rights reserved. COUB (TM) is a trademark of Coub Ltd. ##
## http://coub.com ##
## ##
######################################################################## INFO ##
# Import PyQt5 modules
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QVBoxLayout
# Import coublet modules
from views.vars import *
from widgets.anim import CoubletAnimatedGifWidget
#------------------------------------------------------------------------------#
class CoubletStreamView(QVBoxLayout):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, presenter, parent=None):
super().__init__(parent)
# Store static values
self._presenter = presenter
# Build GUI
self._build_gui()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def show_loading(self, sync):
if sync:
self.insertWidget(0, self._loading_indicator, alignment=Qt.AlignHCenter)
else:
self.addWidget(self._loading_indicator, alignment=Qt.AlignHCenter)
self._loading_indicator.show()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def hide_loading(self):
self._loading_indicator.hide()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def append_post(self, post):
self.addWidget(post)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def insert_post(self, post):
self.insertWidget(0, post)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _build_gui(self):
# Set dimensional values
self.setSpacing(POST_SPACING_HEAD + POST_SPACING_TAIL)
self.setContentsMargins(LARGE_PADDING,
POST_SPACING_HEAD,
0,
POST_SPACING_TAIL)
# Create loading indicator
self._loading_indicator = CoubletAnimatedGifWidget(CONSTANTS['anim_busy_dark'], 32, 16)
self._loading_indicator.hide()
| mit | 185,259,562,077,687,900 | 45.415584 | 95 | 0.348629 | false |
agraubert/agutil | tests/test_ciphers.py | 1 | 11681 | import unittest
import os
from py_compile import compile
import sys
import random
import tempfile
from filecmp import cmp
import rsa.randnum
from hashlib import md5
import Cryptodome.Cipher.AES as AES
import warnings
from itertools import chain
import hashlib
key = b'\x91^\x86\x91`\xae\xb3n4\xf8\xca\xf1\x90iT\xc2\xb6`\xa99*\xect\x93\x84I\xe1}\xdc)\x8c\xa6'
legacy_key = b"\x9a1'W\n:\xda[\x18VZ\x94\xffm,\x9d\xd1\xb1Z9\xa0\x08\xc3q&\xec><\x10\x10\x1e\xa6"
class test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.script_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
),
"agutil",
"security",
"src",
"cipher.py"
)
cls.header_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
),
"agutil",
"security",
"src",
"cipher_header.py"
)
cls.test_data_dir = os.path.join(
os.path.dirname(__file__),
'data',
'encryption'
)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(cls.script_path))))
random.seed()
def test_compilation(self):
compiled_path = compile(self.header_path)
self.assertTrue(compiled_path)
compiled_path = compile(self.script_path)
self.assertTrue(compiled_path)
def test_bitmasks(self):
from agutil.security.src.cipher import Bitmask
for trial in range(15):
mask = Bitmask()
n = 0
for bit in range(random.randint(0,7)):
bit = random.randint(0,7)
current = bool(n & (1 << bit))
self.assertEqual(mask[bit], current)
mask[bit] = True
n = n | (1 << bit)
current = bool(n & (1 << bit))
self.assertEqual(mask[bit], current)
self.assertEqual(mask.mask.data, n)
def test_exceptions(self):
from agutil.security.src.cipher import HeaderLengthError, EncryptionCipher, DecryptionCipher
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
encryptor = EncryptionCipher(key)
data = encryptor.encrypt(source) + encryptor.finish()
with self.assertRaises(HeaderLengthError):
decryptor = DecryptionCipher(data[:24], key)
def test_all_ciphers(self):
from agutil.security.src.cipher import EncryptionCipher, DecryptionCipher
from Cryptodome.Cipher import AES
ciphers = {
AES.MODE_ECB, AES.MODE_CBC, AES.MODE_CTR, AES.MODE_CFB,
AES.MODE_OFB, AES.MODE_OPENPGP, AES.MODE_CCM, AES.MODE_EAX,
AES.MODE_GCM, AES.MODE_SIV, AES.MODE_OCB
}
for cipher in ciphers:
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
encryptor = EncryptionCipher(
key,
cipher_type=cipher
)
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
if cipher <= AES.MODE_OPENPGP:
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
encryptor = EncryptionCipher(
key,
secondary_cipher_type=cipher,
use_legacy_ciphers=True,
)
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
encryptor = EncryptionCipher(
key,
secondary_cipher_type=cipher,
encrypted_nonce=True
)
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
def test_encryption_decryption(self):
from agutil.security.src.cipher import configure_cipher, EncryptionCipher, DecryptionCipher
for trial in range(5):
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
encryptor = EncryptionCipher(key)
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
encryptor = EncryptionCipher(
key,
use_legacy_ciphers=True
)
self.assertEqual(encryptor.header_buffer[:16], encryptor.header.data)
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
encryptor = EncryptionCipher(
key,
enable_compatability=True
)
self.assertNotEqual(encryptor.header_buffer[:16], encryptor.header.data)
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
def test_bulk(self):
from agutil.security.src.cipher import configure_cipher, EncryptionCipher, DecryptionCipher
for trial in range(5):
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
encryptor = EncryptionCipher(
key,
enable_streaming=False
)
header_len = len(encryptor.header_buffer)
data = encryptor.encrypt(source)
self.assertEqual(len(data), header_len)
data += encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
def test_inline(self):
from agutil.security.src.cipher import configure_cipher, EncryptionCipher, DecryptionCipher
for trial in range(5):
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
encryptor = EncryptionCipher(key)
decryptor = DecryptionCipher(
encryptor.encrypt(source) + encryptor.finish(),
key
)
compare = decryptor.decrypt() + decryptor.finish()
self.assertEqual(source, compare)
def test_external_nonce(self):
from agutil.security.src.cipher import configure_cipher, EncryptionCipher, DecryptionCipher
for trial in range(5):
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
nonce = os.urandom(16)
encryptor = EncryptionCipher(
key,
nonce,
store_nonce=False
)
self.assertNotIn(nonce, encryptor.header_buffer)
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key, encryptor.nonce)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
def test_encrypted_nonce(self):
from agutil.security.src.cipher import configure_cipher, EncryptionCipher, DecryptionCipher
for trial in range(5):
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
nonce = os.urandom(16)
encryptor = EncryptionCipher(
key,
nonce,
encrypted_nonce=True
)
self.assertNotEqual(nonce, encryptor.header_buffer[-16:])
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
encryptor = EncryptionCipher(
key,
nonce,
encrypted_nonce=True,
legacy_randomized_nonce=True,
legacy_store_nonce=False
)
self.assertNotEqual(nonce, encryptor.header_buffer[-16:])
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
source = os.urandom(1024 * random.randint(1,16))
key = os.urandom(32)
nonce = os.urandom(16)
encryptor = EncryptionCipher(
key,
nonce,
encrypted_nonce=True,
encrypted_tag=True
)
self.assertNotEqual(nonce, encryptor.header_buffer[-16:])
data = encryptor.encrypt(source) + encryptor.finish()
self.assertNotEqual(source, data)
decryptor = DecryptionCipher(data[:64], key)
compare = decryptor.decrypt(data[64:]) + decryptor.finish()
self.assertEqual(source, compare)
def test_legacy_decryption(self):
from agutil.security.src.cipher import DecryptionCipher
with open(os.path.join(self.test_data_dir, 'expected'), 'rb') as r:
expected = r.read()
with open(os.path.join(self.test_data_dir, 'legacy'), 'rb') as r:
cipher = DecryptionCipher(
r.read(),
key,
)
self.assertEqual(expected, cipher.decrypt() + cipher.finish())
with open(os.path.join(self.test_data_dir, 'encrypted'), 'rb') as r:
cipher = DecryptionCipher(
r.read(),
key,
legacy_force=True
)
self.assertEqual(expected, cipher.decrypt() + cipher.finish())
with open(os.path.join(self.test_data_dir, 'encrypted.3.3'), 'rb') as r:
cipher = DecryptionCipher(
r.read(),
legacy_key,
)
self.assertEqual(expected, cipher.decrypt() + cipher.finish())
with open(os.path.join(self.test_data_dir, 'encrypted.3.5'), 'rb') as r:
cipher = DecryptionCipher(
r.read(),
legacy_key,
)
self.assertEqual(expected, cipher.decrypt() + cipher.finish())
| mit | -7,161,250,493,035,844,000 | 40.275618 | 100 | 0.559456 | false |
citrix-openstack-build/cinder | cinder/scheduler/filters/capacity_filter.py | 1 | 1879 | # Copyright (c) 2012 Intel
# Copyright (c) 2012 OpenStack, LLC.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import filters
LOG = logging.getLogger(__name__)
class CapacityFilter(filters.BaseHostFilter):
"""CapacityFilter filters based on volume host's capacity utilization."""
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient capacity."""
volume_size = filter_properties.get('size')
if not host_state.free_capacity_gb:
# Fail Safe
LOG.warning(_("Free capacity not set;"
"volume node info collection broken."))
return False
free_space = host_state.free_capacity_gb
if free_space == 'infinite' or free_space == 'unknown':
# NOTE(zhiteng) for those back-ends cannot report actual
# available capacity, we assume it is able to serve the
# request. Even if it was not, the retry mechanism is
# able to handle the failure by rescheduling
return True
reserved = float(host_state.reserved_percentage) / 100
free = math.floor(free_space * (1 - reserved))
return free >= volume_size
| apache-2.0 | -1,524,050,791,477,338,600 | 35.843137 | 78 | 0.666844 | false |
SunPower/PVMismatch | pvmismatch/pvmismatch_tk/pvmodule_tk.py | 1 | 3397 | # -*- coding: utf-8 -*-
"""
Created on Jul 29, 2012
@author: marko
"""
from Tkinter import Frame, Label, Button, OptionMenu, IntVar
#from Tkinter import Menu, Entry
MODULE_SIZES = [72, 96, 128]
class PVmodule_tk(Frame):
"""
classdocs
"""
def __init__(self, pvapp, top):
"""
Constructor
"""
self.pvapp = pvapp
Frame.__init__(self, top)
self.pack(expand=True) # if user resizes, expand Frame
self.pack(fill='both')
self.focus_set() # get the focus
self.grab_set() # make this window modal
self['bg'] = 'black' # set black background
self['padx'] = '15' # pad sides with 15 points
self['pady'] = '5' # pad top/bottom 5 points
self.master.title('PVmodule') # set title bar
self.SPlogoLabel = Label(self, image=self.pvapp.SPlogo,
cnf={'borderwidth': '0'})
self.SPlogoLabel.pack({'side': 'top'})
self.numberCells = IntVar(self) # bind numberCells
self.numberCells.set(MODULE_SIZES[0]) # default value
# pylint: disable = W0142
self.numberCellsOption = OptionMenu(self, self.numberCells,
*MODULE_SIZES)
# pylint: enable = W0142
self.numberCellsOption.pack({'side': 'top', 'fill': 'both'})
self.QUIT = Button(self, cnf={'text': 'Quit', 'command': self.quit})
self.QUIT.pack({'side': 'top', 'fill': 'both'})
# # cell ID # spinbox
# cellID = self.cellID = IntVar(self) # bind moduleID
# cellID.set(1)
# # cell ID # label
# self.cellIDlabel = Label(pvModFrame, text='Cell ID #')
# self.cellIDlabel.pack(side=LEFT)
# spacer(pvModFrame, 16, LEFT)
# # cell ID spinbox
# maxModSize = max(MOD_SIZES)
# spinboxCnf = {'name': 'cellIDspinbox', 'from_': 1, 'to': maxModSize,
# 'textvariable': cellID, 'width': 5, 'validate': 'all',
# 'validatecommand': vcmd, 'invalidcommand': invcmd}
# self.cellIDspinbox = Spinbox(pvModFrame, cnf=spinboxCnf)
# self.cellIDspinbox.pack(side=LEFT)
# self.pvModButton = Button(pvModFrame,
# cnf={'text': PVMODULE_TEXT})
# self.pvModButton.pack(side=RIGHT)
# self.pvModButton['command'] = self.startPVmodule_tk
# self.separatorLine() # separator
# # module ID # label
# labelCnf = {'name': 'modIDlabel', 'text': 'Module ID #'}
# self.modIDlabel = Label(pvStrFrame, cnf=labelCnf)
# self.modIDlabel.pack(side=LEFT)
# # module ID # spinbox
# spinboxCnf = {'name': 'modIDspinbox', 'from_': 1, 'to': MAX_MODULES,
# 'textvariable': modID, 'width': 5, 'validate': 'all',
# 'validatecommand': vcmd, 'invalidcommand': invcmd}
# self.modIDspinbox = Spinbox(pvStrFrame, cnf=spinboxCnf)
# self.modIDspinbox.pack(side=LEFT)
# # PVmodule button
# self.pvStrButton = Button(pvStrFrame, cnf={'text': PVSTRING_TEXT})
# self.pvStrButton.pack(side=RIGHT)
# self.pvStrButton['command'] = self.startPVstring_tk
# self.separatorLine() # separator
#
# ## PVmodule frame
# pvModFrame = self.pvModFrame = Frame(master, name='pvModFrame')
# pvModFrame.pack(fill=BOTH)
| bsd-3-clause | 5,823,362,148,952,416,000 | 38.045977 | 77 | 0.565793 | false |
avivga/cocktail-party | video2speech/network.py | 1 | 3363 | from keras import optimizers
from keras.layers import Dense, Convolution3D, MaxPooling3D, ZeroPadding3D, Dropout, Flatten, BatchNormalization, LeakyReLU
from keras.layers.wrappers import TimeDistributed
from keras.models import Sequential, model_from_json
class VideoToSpeechNet:
def __init__(self, model):
self._model = model
@staticmethod
def build(video_shape, audio_spectrogram_size):
model = Sequential()
model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero1', input_shape=video_shape))
model.add(Convolution3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv1'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max1'))
model.add(Dropout(0.25))
model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero2'))
model.add(Convolution3D(64, (3, 5, 5), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv2'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max2'))
model.add(Dropout(0.25))
model.add(ZeroPadding3D(padding=(1, 1, 1), name='zero3'))
model.add(Convolution3D(128, (3, 3, 3), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv3'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max3'))
model.add(Dropout(0.25))
model.add(TimeDistributed(Flatten(), name='time'))
model.add(Dense(1024, kernel_initializer='he_normal', name='dense1'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.25))
model.add(Dense(1024, kernel_initializer='he_normal', name='dense2'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(2048, kernel_initializer='he_normal', name='dense3'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.25))
model.add(Dense(2048, kernel_initializer='he_normal', name='dense4'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.25))
model.add(Dense(audio_spectrogram_size, name='output'))
model.summary()
return VideoToSpeechNet(model)
@staticmethod
def load(model_cache_path, weights_cache_path):
with open(model_cache_path, "r") as model_fd:
model = model_from_json(model_fd.read())
model.load_weights(weights_cache_path)
return VideoToSpeechNet(model)
def train(self, x, y, learning_rate=0.01, epochs=200):
optimizer = optimizers.adam(lr=learning_rate, decay=1e-6)
self._model.compile(loss="mean_squared_error", optimizer=optimizer)
self._model.fit(x, y, batch_size=32, validation_split=0.05, epochs=epochs, verbose=1)
def fine_tune(self, x, y):
first_tuned_layer_index = self._get_layer_names().index("time")
for layer in self._model.layers[:first_tuned_layer_index]:
layer.trainable = False
self._model.summary()
self.train(x, y, epochs=100)
def predict(self, x):
y = self._model.predict(x)
return y
def dump(self, model_cache_path, weights_cache_path):
with open(model_cache_path, "w") as model_fd:
model_fd.write(self._model.to_json())
self._model.save_weights(weights_cache_path)
def _get_layer_names(self):
return [layer.name for layer in self._model.layers]
| mit | 6,291,750,038,852,244,000 | 31.650485 | 123 | 0.704728 | false |
liampauling/flumine | examples/tennisexample.py | 1 | 1740 | import time
import logging
import betfairlightweight
from betfairlightweight.filters import streaming_market_filter
from pythonjsonlogger import jsonlogger
from flumine import Flumine, clients, BaseStrategy
from flumine.worker import BackgroundWorker
from workers.inplayservice import poll_in_play_service
logger = logging.getLogger()
custom_format = "%(asctime) %(levelname) %(message)"
log_handler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter(custom_format)
formatter.converter = time.gmtime
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)
class ExampleStrategy(BaseStrategy):
def check_market_book(self, market, market_book):
# process_market_book only executed if this returns True
if market_book.status != "CLOSED":
return True
def process_market_book(self, market, market_book):
# process marketBook object
if "score" in market.context:
score = market.context["score"]
print(
score.match_status,
score.current_game,
score.current_set,
score.current_point,
score.score.home.score,
score.score.away.score,
)
trading = betfairlightweight.APIClient("username")
client = clients.BetfairClient(trading)
framework = Flumine(client=client)
strategy = ExampleStrategy(
market_filter=streaming_market_filter(market_ids=["1.172415939"]),
)
framework.add_strategy(strategy)
framework.add_worker(
BackgroundWorker(
framework,
poll_in_play_service,
func_kwargs={"event_type_id": "2"},
interval=30,
start_delay=4,
)
)
framework.run()
| mit | 9,059,402,078,703,876,000 | 27.064516 | 70 | 0.686782 | false |
queria/my-tempest | tempest/cli/simple_read_only/compute/test_nova_manage.py | 1 | 3143 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import cli
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
"""
This is a first pass at a simple read only nova-manage test. This
only exercises client commands that are read only.
This should test commands:
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
@classmethod
def resource_setup(cls):
if not CONF.service_available.nova:
msg = ("%s skipped as Nova is not available" % cls.__name__)
raise cls.skipException(msg)
if not CONF.cli.has_manage:
msg = ("%s skipped as *-manage commands not available"
% cls.__name__)
raise cls.skipException(msg)
super(SimpleReadOnlyNovaManageTest, cls).resource_setup()
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.nova_manage,
'this-does-nova-exist')
# NOTE(jogo): Commands in order listed in 'nova-manage -h'
# test flags
def test_help_flag(self):
self.nova_manage('', '-h')
def test_version_flag(self):
# Bug 1159957: nova-manage --version writes to stderr
self.assertNotEqual("", self.nova_manage('', '--version',
merge_stderr=True))
self.assertEqual(self.nova_manage('version'),
self.nova_manage('', '--version', merge_stderr=True))
def test_debug_flag(self):
self.assertNotEqual("", self.nova_manage('service list',
'--debug'))
def test_verbose_flag(self):
self.assertNotEqual("", self.nova_manage('service list',
'--verbose'))
# test actions
def test_version(self):
self.assertNotEqual("", self.nova_manage('version'))
def test_db_sync(self):
# make sure command doesn't error out
self.nova_manage('db sync')
def test_db_version(self):
self.assertNotEqual("", self.nova_manage('db version'))
def test_cell_list(self):
# make sure command doesn't error out
self.nova_manage('cell list')
def test_host_list(self):
# make sure command doesn't error out
self.nova_manage('host list')
| apache-2.0 | 9,148,735,293,197,965,000 | 33.163043 | 78 | 0.628062 | false |
ehrenfeu/simplify | misc/vcf_extract_photos.py | 1 | 1656 | #!/usr/bin/env python
"""Extract photos from all .VCF files in a given directory."""
import sys
import argparse
import os.path
import vobject
def process_vcf_file(pathname):
"""Process a VCF file (or all in a directory)."""
outfname = os.path.splitext(pathname)[0]
with open(pathname, 'r') as vcf_file:
vcf_string = vcf_file.read()
vcard = vobject.readOne(vcf_string)
print("Processing '%s'..." % vcard.fn.value)
print(" - UID: %s" % vcard.uid.value)
if 'photo' not in vcard.contents:
print("NO PHOTO in file '%s'!" % pathname)
return
photo_type = vcard.photo.params['TYPE'][0].lower()
outfname += "." + photo_type
print(" - type: %s" % photo_type)
if os.path.exists(outfname):
print("NOT overwriting file '%s'!" % outfname)
return
print(" - writing photo to file: %s" % outfname)
with open(outfname, 'wb') as fout:
fout.write(vcard.photo.value)
def parse_arguments():
"""Parse commandline arguments."""
argparser = argparse.ArgumentParser(description=__doc__)
add = argparser.add_argument
add('--vcf', required=True,
help='VCF file to extract photo from.')
add('-v', '--verbosity', dest='verbosity',
action='count', default=0)
try:
args = argparser.parse_args()
except IOError as err:
argparser.error(str(err))
return args
def main():
"""Parse commandline arguments and run parser."""
args = parse_arguments()
process_vcf_file(args.vcf)
print("DONE.\n")
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | 3,565,980,432,918,878,000 | 28.571429 | 62 | 0.59058 | false |
hguemar/cinder | cinder/tests/scheduler/test_scheduler.py | 1 | 13028 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from oslo_config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.scheduler import driver
from cinder.scheduler import filter_scheduler
from cinder.scheduler import manager
from cinder import test
CONF = cfg.CONF
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'cinder.scheduler.driver.Scheduler'
class AnException(Exception):
pass
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_empty_dict(self, _mock_update_cap):
# Test no capabilities passes empty dictionary
service = 'fake_service'
host = 'fake_host'
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host)
_mock_update_cap.assert_called_once_with(service, host, {})
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_correct(self, _mock_update_cap):
# Test capabilities passes correctly
service = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host,
capabilities=capabilities)
_mock_update_cap.assert_called_once_with(service, host, capabilities)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.db.volume_update')
def test_create_volume_exception_puts_volume_in_error_state(
self, _mock_volume_update, _mock_sched_create):
# Test NoValidHost exception behavior for create_volume.
# Puts the volume in 'error' state and eats the exception.
_mock_sched_create.side_effect = exception.NoValidHost(reason="")
fake_volume_id = 1
topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id}
self.manager.create_volume(self.context, topic, fake_volume_id,
request_spec=request_spec,
filter_properties={})
_mock_volume_update.assert_called_once_with(self.context,
fake_volume_id,
{'status': 'error'})
_mock_sched_create.assert_called_once_with(self.context, request_spec,
{})
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
@mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_host_passes):
# Test NoValidHost exception behavior for migrate_volume_to_host.
# Puts the volume in 'error_migrating' state and eats the exception.
_mock_host_passes.side_effect = exception.NoValidHost(reason="")
fake_volume_id = 1
topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id}
self.manager.migrate_volume_to_host(self.context, topic,
fake_volume_id, 'host', True,
request_spec=request_spec,
filter_properties={})
_mock_volume_update.assert_called_once_with(self.context,
fake_volume_id,
{'migration_status': None})
_mock_host_passes.assert_called_once_with(self.context, 'host',
request_spec, {})
def test_chance_simple_scheduler_mocked(self):
# Test FilterScheduler is loaded and predefined combination
# of filters and weighers overrides the default value of config option
# scheduler_default_filters and scheduler_default_weighers when
# ChanceScheduler or SimpleScheduler is configured as scheduler_driver.
chance = 'cinder.scheduler.chance.ChanceScheduler'
simple = 'cinder.scheduler.simple.SimpleScheduler'
default_filters = ['AvailabilityZoneFilter',
'CapacityFilter',
'CapabilitiesFilter']
self.flags(scheduler_driver=chance,
scheduler_default_filters=['CapacityFilter'],
scheduler_default_weighers=['CapacityWeigher'])
self.manager = self.manager_cls()
self.assertTrue(isinstance(self.manager.driver,
filter_scheduler.FilterScheduler))
self.assertEqual(CONF.scheduler_default_filters,
default_filters)
self.assertEqual(CONF.scheduler_default_weighers,
['ChanceWeigher'])
self.flags(scheduler_driver=simple,
scheduler_default_filters=['CapacityFilter'],
scheduler_default_weighers=['CapacityWeigher'])
self.manager = self.manager_cls()
self.assertTrue(isinstance(self.manager.driver,
filter_scheduler.FilterScheduler))
self.assertEqual(CONF.scheduler_default_filters,
default_filters)
self.assertEqual(CONF.scheduler_default_weighers,
['AllocatedCapacityWeigher'])
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.db.volume_get')
def test_retype_volume_exception_returns_volume_state(self, _mock_vol_get,
_mock_vol_update):
# Test NoValidHost exception behavior for retype.
# Puts the volume in original state and eats the exception.
fake_volume_id = 1
topic = 'fake_topic'
volume_id = fake_volume_id
request_spec = {'volume_id': fake_volume_id, 'volume_type': {'id': 3},
'migration_policy': 'on-demand'}
vol_info = {'id': fake_volume_id, 'status': 'in-use',
'instance_uuid': 'foo', 'attached_host': None}
_mock_vol_get.return_value = vol_info
_mock_vol_update.return_value = {'status': 'in-use'}
_mock_find_retype_host = mock.Mock(
side_effect=exception.NoValidHost(reason=""))
orig_retype = self.manager.driver.find_retype_host
self.manager.driver.find_retype_host = _mock_find_retype_host
self.manager.retype(self.context, topic, volume_id,
request_spec=request_spec,
filter_properties={})
_mock_vol_get.assert_called_once_with(self.context, fake_volume_id)
_mock_find_retype_host.assert_called_once_with(self.context,
request_spec, {},
'on-demand')
_mock_vol_update.assert_called_once_with(self.context, fake_volume_id,
{'status': 'in-use'})
self.manager.driver.find_retype_host = orig_retype
def test_create_consistencygroup_exceptions(self):
with mock.patch.object(filter_scheduler.FilterScheduler,
'schedule_create_consistencygroup') as mock_cg:
original_driver = self.manager.driver
self.manager.driver = filter_scheduler.FilterScheduler
LOG = logging.getLogger('cinder.scheduler.manager')
self.stubs.Set(LOG, 'error', mock.Mock())
self.stubs.Set(LOG, 'exception', mock.Mock())
self.stubs.Set(db, 'consistencygroup_update', mock.Mock())
ex = exception.CinderException('test')
mock_cg.side_effect = ex
group_id = '1'
self.assertRaises(exception.CinderException,
self.manager.create_consistencygroup,
self.context,
'volume',
group_id)
LOG.exception.assert_called_once_with(_(
"Failed to create consistency group "
"%(group_id)s."), {'group_id': group_id})
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': 'error'})
mock_cg.reset_mock()
LOG.exception.reset_mock()
db.consistencygroup_update.reset_mock()
mock_cg.side_effect = exception.NoValidHost(
reason="No weighed hosts available")
self.manager.create_consistencygroup(
self.context, 'volume', group_id)
LOG.error.assert_called_once_with(_(
"Could not find a host for consistency group "
"%(group_id)s.") % {'group_id': group_id})
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': 'error'})
self.manager.driver = original_driver
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities(self, _mock_update_cap):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.driver.update_service_capabilities(service_name, host,
capabilities)
_mock_update_cap.assert_called_once_with(service_name, host,
capabilities)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods
that can't will fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
@mock.patch('cinder.db.volume_update')
@mock.patch('oslo.utils.timeutils.utcnow')
def test_volume_host_update_db(self, _mock_utcnow, _mock_vol_update):
_mock_utcnow.return_value = 'fake-now'
driver.volume_update_db(self.context, 31337, 'fake_host')
_mock_vol_update.assert_called_once_with(self.context, 31337,
{'host': 'fake_host',
'scheduled_at': 'fake-now'})
| apache-2.0 | 3,509,180,788,054,477,000 | 44.393728 | 79 | 0.585048 | false |
MagazinnikIvan/pywinauto | pywinauto/tests/comparetoreffont.py | 1 | 4734 | # GUI Application automation and testing library
# Copyright (C) 2006-2017 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Compare against reference font test
**What is checked**
This test checks all the parameters of the font for the control against the
font for the reference control. If any value is different then this is reported
as a bug.
Here is a list of all the possible values that are tested:
lfFaceName The name of the font
lfHeight The height of the font
lfWidth Average width of characters
lfEscapement Angle of text
lfOrientation Another angle for the text!
lfWeight How bold the text is
lfItalic If the font is italic
lfUnderline If the font is underlined
lfStrikeOut If the font is struck out
lfCharSet The character set of the font
lfOutPrecision The output precision
lfClipPrecision The clipping precision
lfQuality The output quality
lfPitchAndFamily The pitch and family
**How is it checked**
Each property of the font for the control being tested is compared against the
equivalent property of the reference control font for equality.
**When is a bug reported**
For each property of the font that is not identical to the reference font a bug
is reported. So for example if the Font Face has changed and the text is bold
then (at least) 2 bugs will be reported.
**Bug Extra Information**
The bug contains the following extra information
Name Description
ValueType What value is incorrect (see above), String
Ref The reference value converted to a string, String
Loc The localised value converted to a string, String
**Is Reference dialog needed**
This test will not run if the reference controls are not available.
**False positive bug reports**
Running this test for Asian languages will result in LOTS and LOTS of false
positives, because the font HAS to change for the localised text to display
properly.
**Test Identifier**
The identifier for this test/bug is "CompareToRefFont"
"""
testname = "CompareToRefFont"
import six
from pywinauto import win32structures
_font_attribs = [field[0] for field in win32structures.LOGFONTW._fields_]
def CompareToRefFontTest(windows):
"""Compare the font to the font of the reference control"""
bugs = []
for win in windows:
# if no reference then skip the control
if not win.ref:
continue
# find each of the bugs
for font_attrib in _font_attribs:
loc_value = getattr(win.font(), font_attrib)
# get the reference value
ref_value = getattr(win.ref.font(), font_attrib)
# If they are different
if loc_value != ref_value:
# Add the bug information
bugs.append((
[win, ],
{
"ValueType": font_attrib,
"Ref": six.text_type(ref_value),
"Loc": six.text_type(loc_value),
},
testname,
0,)
)
return bugs
| bsd-3-clause | -1,272,640,587,114,093,800 | 36.803279 | 80 | 0.705746 | false |
blalab/csv2mr | item_post.py | 1 | 1896 | import requests
import gzip
import bz2
import csv
def opener(filenames):
for name in filenames:
if name.endswith(".gz"):f = gzip.open(name)
elif name.endswith(".bz2"): f = bz2.BZ2File(name)
else: f = open(name)
yield f
def cat(filelist):
for f in filelist:
r = csv.reader(f,delimiter=',', quotechar='"')
for row in r:
yield row
def populate(rows,rmap):
for row in rows:
#l = line.split(',')
print('Try:',row)
#print(rmap)
item = {}
for k in rmap:
#print(k)
try:
if type(rmap[k]) is dict:
item[k] = {}
for lang in rmap[k]:
item[k][lang] = row[int(rmap[k][lang])-1]
else:
item[k] = row[int(rmap[k])-1]
except(IndexError):
print("Item Corrupt, incomplete column:%s"%rmap[k] )
raise
yield item
'''
sample = 1097,YUMA,SHILO INN YUMA,"Gracious Southwestern furnishings and a friendly staff set the stage for a memorable visit at the Shilo Inn Yuma. This full-service hotel features 135 guestrooms with microwaves, refrigerators, coffee makers, data ports and ironing units. After a long day of meetings, shopping or sight seeing,luxuriate in our spa or Olympic-sized pool. High speed Internet access now available in all rooms.",,,,,,,,
'''
rmap = {u'cc784d6f': '1', u'4f9a647f': '2', u'2aa59797': '11', u'8e5b7eb1': '4', u'1a3ffd3b': {'fr': '9', 'en': '8', 'it': '5', 'sp': '6'}, u'd88555ed': '3'}
filelist = ['TT1.csv']
openedfiles = opener(filelist)
lines = cat(openedfiles)
itemlist = populate(lines,rmap)
count = 0
for item in itemlist:
count += 1
print(item)
print('Total Items:%s'%count)
| mit | 650,444,844,877,061,600 | 31.689655 | 436 | 0.545359 | false |
kave/cfgov-refresh | cfgov/v1/models/sublanding_filterable_page.py | 1 | 4172 | from itertools import chain
from operator import attrgetter
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import models
from django.db.models import Q
from wagtail.wagtailadmin.edit_handlers import StreamFieldPanel, FieldPanel
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList
from wagtail.wagtailcore import blocks
from wagtail.wagtailimages.blocks import ImageChooserBlock
from . import base, ref
from ..atomic_elements import molecules, organisms
from .learn_page import AbstractFilterPage
from .. import forms
from ..util import filterable_context
from .base import CFGOVPage
from ..feeds import FilterableFeedPageMixin
class SublandingFilterablePage(FilterableFeedPageMixin, base.CFGOVPage):
header = StreamField([
('hero', molecules.Hero()),
], blank=True)
content = StreamField([
('text_introduction', molecules.TextIntroduction()),
('full_width_text', organisms.FullWidthText()),
('filter_controls', organisms.FilterControls()),
('featured_content', molecules.FeaturedContent()),
])
# General content tab
content_panels = CFGOVPage.content_panels + [
StreamFieldPanel('header'),
StreamFieldPanel('content'),
]
# Tab handler interface
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='General Content'),
ObjectList(CFGOVPage.sidefoot_panels, heading='Sidebar'),
ObjectList(CFGOVPage.settings_panels, heading='Configuration'),
])
template = 'sublanding-page/index.html'
def get_context(self, request, *args, **kwargs):
context = super(SublandingFilterablePage, self).get_context(request, *args, **kwargs)
return filterable_context.get_context(self, request, context)
def get_form_class(self):
return forms.FilterableListForm
def get_page_set(self, form, hostname):
return filterable_context.get_page_set(self, form, hostname)
class ActivityLogPage(SublandingFilterablePage):
template = 'activity-log/index.html'
def get_page_set(page, form, hostname):
queries = {}
selections = {}
categories_cache = list(form.cleaned_data.get('categories', []))
# Get filter selections for Blog and Report
for f in page.content:
if 'filter_controls' in f.block_type and f.value['categories']['page_type'] == 'activity-log':
categories = form.cleaned_data.get('categories', [])
selections = {'blog': False, 'research-reports': False}
for category in selections.keys():
if not categories or category in categories:
selections[category] = True
if category in categories:
del categories[categories.index(category)]
# Get Newsroom pages
if not categories_cache or map(lambda x: x in [c[0] for c in ref.choices_for_page_type('newsroom')], categories):
try:
parent = CFGOVPage.objects.get(slug='newsroom')
queries['newsroom'] = AbstractFilterPage.objects.child_of_q(parent) & form.generate_query()
except CFGOVPage.DoesNotExist:
print 'Newsroom does not exist'
# Get Blog and Report pages if they were selected
del form.cleaned_data['categories']
for slug, is_selected in selections.iteritems():
if is_selected:
try:
parent = CFGOVPage.objects.get(slug=slug)
queries.update({slug: AbstractFilterPage.objects.child_of_q(parent) & form.generate_query()})
except CFGOVPage.DoesNotExist:
print slug, 'does not exist'
# AND all selected queries together
final_q = reduce(lambda x,y: x|y, queries.values())
return AbstractFilterPage.objects.live_shared(hostname).filter(final_q).distinct().order_by('-date_published')
def get_form_class(self):
return forms.ActivityLogFilterForm
| cc0-1.0 | -4,730,683,566,083,953,000 | 38.358491 | 121 | 0.665388 | false |
yasutaka/nlp_100 | kiyota/34.py | 1 | 2032 | """
34. 「AのB」
2つの名詞が「の」で連結されている名詞句を抽出せよ.
"""
# -*- coding: utf-8 -*-
import codecs
list_d = [] #文書のリスト
def do_mecab(textfile):
import codecs
import copy
list_s = [] #文章単位のリスト
#list_d = [] #文書のリスト
# load data
with codecs.open(textfile, 'r', 'utf-8') as f:
for line in f:
#sys.stdout.write(str(line))
morpheme = line.split("\t") # 表層形とその他に分離
#print(morpheme)
if morpheme[0] != " " and len(morpheme) != 1: # 空白とEOSは除外
#print(morpheme)
element = morpheme[1].split(",")
dic = {"surface":morpheme[0],"base":element[6],"pos":element[0],"pos1":element[1]}
list_s.append(dic)
#list_s.append(copy.deepcopy(dic))
#print(list_s)
else:
if len(list_s) != 0:
#print(list_s)
list_d.append(copy.deepcopy(list_s))
list_s.clear()
return list_d
def get_nphrase():
nphrase = ""
i = 0
while i < len(list_d):
sentence = list_d[i]
j = 0
while j < len(sentence)-2:
#print(sentence[j]["pos"] + sentence[j]["surface"] + sentence[j]["pos"])
if sentence[j]["pos"] + sentence[j+1]["surface"] + sentence[j+2]["pos"] == "名詞の名詞":
phrase = sentence[j]["surface"] + sentence[j+1]["surface"] + sentence[j+2]["surface"]
nphrase = nphrase + phrase + '\n'
#print(phrase)
j += 1
i += 1
return nphrase
def create_file(data):
with codecs.open('34-output.txt','w','utf-8') as f:
f.write(data)
#do_mecab('test-neko.txt.mecab')
do_mecab('neko.txt.mecab')
txtdata = get_nphrase()
create_file(txtdata)
#print(list_d)
#print(list_d[1])
#print(list_d[1][0]["pos"])
| mit | 4,808,684,086,994,053,000 | 29.533333 | 101 | 0.487844 | false |
mwrlabs/veripy | contrib/rfc4861/router_advertisement_processing_on_link_determination.py | 1 | 2198 | from constants import *
from scapy.all import *
from veripy.assertions import *
from veripy.models import ComplianceTestCase
class OnLinkDeterminationTestCase(ComplianceTestCase):
"""
Router Advertisement Processing, On-link determination
Verify that a host properly rejects an invalid prefix length, however the
prefix length is still valid for on-link determination when the on-link
flag is true.
@private
Source: IPv6 Ready Phase-1/Phase-2 Test Specification Core
Protocols (Test v6LC.2.2.19)
"""
disabled_nd = True
disabled_ra = True
restart_uut = True
def run(self):
self.logger.info("Sending a Router Advertisement from TR1...")
self.router(1).send(
IPv6(src=str(self.router(1).link_local_ip(iface=1)), dst="ff02::1")/
ICMPv6ND_RA(prf=1)/
ICMPv6NDOptPrefixInfo(prefixlen=96, prefix=self.link(2).v6_prefix, L=True)/
ICMPv6NDOptSrcLLAddr(lladdr=self.router(1).iface(1).ll_addr), iface=1)
self.logger.info("Sending an Echo Request from TN1...")
self.node(1).send(
IPv6(src=str(self.node(1).global_ip()), dst=str(self.target(1).global_ip()))/
ICMPv6EchoRequest(seq=self.next_seq()))
self.logger.info("Checking for an ICMPv6 Echo Reply...")
r3 = self.node(1).received(src=self.target(1).global_ip(), seq=self.seq(), type=ICMPv6EchoReply)
assertEqual(1, len(r3), "expected to receive an ICMPv6 Echo Reply (seq: %d)" % (self.seq()))
self.logger.info("Grabbing the Echo Reply to see if TR1 forwarded it...")
r2 = self.router(1).received(iface=1, src=self.target(1).global_ip(), dst=self.node(2).global_ip(), seq=self.seq(), type=ICMPv6EchoReply, raw=True)
assertEqual(self.node(2).global_ip(), r2[0][IPv6].dst, "expected the ICMPv6 Echo Reply dst to be TN2's global address")
assertEqual(self.target(1).ll_addr(), r2[0][Ether].src, "expected the ICMPv6 Echo Reply Ethernet src to be the UUT")
assertNotEqual(self.router(1).iface(1).ll_addr, r2[0][Ether].dst, "did not expect the ICMPv6 Echo Reply to be sent through TR1")
| gpl-3.0 | -4,612,849,881,026,919,000 | 46.782609 | 155 | 0.654231 | false |
danmcp/pman | pman/tests/test_c_snode.py | 1 | 4381 | from unittest import TestCase
from pfmisc.C_snode import C_stree
import pudb
class TestCSnode(TestCase):
def test_csnode_constructor(self):
aTree = C_stree()
bTree = C_stree()
ATree = C_stree()
aTree.cd('/')
aTree.mkcd('a')
aTree.mknode(['b', 'c'])
aTree.cd('b')
aTree.touch('file1', 10)
aTree.touch('file2', "Rudolph Pienaar")
aTree.touch('file3', ['this', 'is', 'a', 'list'])
aTree.touch('file4', ('this', 'is', 'a', 'tuple'))
aTree.touch('file5', {'name': 'rudolph', 'address': '505 Washington'})
aTree.mknode(['d', 'e'])
aTree.cd('d')
aTree.mknode(['h', 'i'])
aTree.cd('/a/b/e')
aTree.mknode(['j', 'k'])
aTree.cd('/a/c')
aTree.mknode(['f', 'g'])
aTree.cd('f')
aTree.mknode(['l', 'm'])
aTree.cd('/a/c/g')
aTree.mknode(['n', 'o'])
ATree.cd('/')
ATree.mkcd('A')
ATree.mknode(['B', 'C'])
ATree.cd('B')
ATree.mknode(['D', 'E'])
ATree.cd('D')
ATree.mknode(['H', 'I'])
ATree.cd('/A/B/E')
ATree.mknode(['J', 'K'])
ATree.cd('/A/B/E/K')
ATree.touch('file1', 11)
ATree.touch('file2', "Reza Pienaar")
ATree.touch('file3', ['this', 'is', 'another', 'list'])
ATree.touch('file4', ('this', 'is', 'another', 'tuple'))
ATree.touch('file5', {'name': 'reza', 'address': '505 Washington'})
ATree.cd('/A/C')
ATree.mknode(['F', 'G'])
ATree.cd('F')
ATree.mknode(['L', 'M'])
ATree.cd('/A/C/G')
ATree.mknode(['N', 'O'])
bTree.cd('/')
bTree.mkcd('1')
bTree.mknode(['2', '3'])
bTree.cd('2')
bTree.mknode(['4', '5'])
bTree.cd('4')
bTree.mknode(['8', '9'])
bTree.cd('/1/2/5')
bTree.mknode(['10', '11'])
bTree.cd('/1/3')
bTree.mknode(['6', '7'])
bTree.cd('6')
bTree.mknode(['12', '13'])
bTree.cd('/1/3/7')
bTree.mknode(['14', '15'])
aTree.tree_metaData_print(False)
ATree.tree_metaData_print(False)
bTree.tree_metaData_print(False)
print('aTree = %s' % aTree)
# print(aTree.pathFromHere_walk('/'))
print('ATree = %s' % ATree)
# print(ATree.pathFromHere_walk('/'))
print('bTree = %s' % bTree)
# print(bTree.pathFromHere_walk('/'))
aTree.cd('/')
aTree.graft(bTree, '/1/2/')
aTree.tree_metaData_print(False)
print('aTree = %s' % aTree)
# print(aTree.pathFromHere_walk('/'))
# print(aTree.l_allPaths)
bTree.cd('/1/2/4/9')
bTree.graft(ATree, '/A/B')
bTree.tree_metaData_print(False)
print('bTree = %s' % bTree)
# print(bTree.pathFromHere_walk('/'))
# print(bTree.l_allPaths)
print('aTree = %s' % aTree)
# print(aTree.pathFromHere_explore('/'))
# print(aTree.l_allPaths)
# print(aTree.filesFromHere_explore('/'))
# print(aTree.l_allFiles)
print('Saving bTree...')
bTree.tree_save(startPath = '/',
pathDiskRoot = '/tmp/bTree',
failOnDirExist = True,
saveJSON = True,
savePickle = False)
print('Saving aTree...')
aTree.tree_save(startPath = '/',
pathDiskRoot = '/tmp/aTree',
failOnDirExist = True,
saveJSON = True,
savePickle = False)
# pudb.set_trace()
print('Reading aTree into cTree...')
cTree = C_stree.tree_load(
pathDiskRoot = '/tmp/aTree',
loadJSON = True,
loadPickle = False)
cTree.tree_metaData_print(False)
print('cTree = %s' % cTree)
cTree.rm('/4/9/B/E/K/file1')
print('cTree = %s' % cTree)
cTree.rm('/4/9/B/E/K/file2')
print('cTree = %s' % cTree)
cTree.rm('/4/9/B/E/K')
print('cTree = %s' % cTree)
dTree = C_stree()
cTree.tree_copy(startPath = '/a/b/file5',
destination = dTree)
print('dTree = %s' % dTree)
| mit | -4,447,620,016,017,195,500 | 30.978102 | 78 | 0.459028 | false |
joshmoore/zeroc-ice | py/test/Ice/operations/TwowaysNewAMI.py | 1 | 17284 | # **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import Ice, Test, math, threading
def test(b):
if not b:
raise RuntimeError('test assertion failed')
class CallbackBase:
def __init__(self):
self._called = False
self._cond = threading.Condition()
def check(self):
self._cond.acquire()
try:
while not self._called:
self._cond.wait()
self._called = False
finally:
self._cond.release()
def called(self):
self._cond.acquire()
self._called = True
self._cond.notify()
self._cond.release()
class Callback(CallbackBase):
def __init__(self, communicator=None):
CallbackBase.__init__(self)
self._communicator = communicator
def ping(self):
self.called()
def isA(self, r):
test(r)
self.called()
def id(self, id):
test(id == "::Test::MyDerivedClass")
self.called()
def ids(self, ids):
test(len(ids) == 3)
self.called()
def opVoid(self):
self.called()
def opByte(self, r, b):
test(b == 0xf0)
test(r == 0xff)
self.called()
def opBool(self, r, b):
test(b)
test(not r)
self.called()
def opShortIntLong(self, r, s, i, l):
test(s == 10)
test(i == 11)
test(l == 12)
test(r == 12)
self.called()
def opFloatDouble(self, r, f, d):
test(f - 3.14 < 0.001)
test(d == 1.1E10)
test(r == 1.1E10)
self.called()
def opString(self, r, s):
test(s == "world hello")
test(r == "hello world")
self.called()
def opMyEnum(self, r, e):
test(e == Test.MyEnum.enum2)
test(r == Test.MyEnum.enum3)
self.called()
def opMyClass(self, r, c1, c2):
test(c1.ice_getIdentity() == self._communicator.stringToIdentity("test"))
test(c2.ice_getIdentity() == self._communicator.stringToIdentity("noSuchIdentity"))
test(r.ice_getIdentity() == self._communicator.stringToIdentity("test"))
# We can't do the callbacks below in serialize mode
if self._communicator.getProperties().getPropertyAsInt("Ice.Client.ThreadPool.Serialize") == 0:
r.opVoid()
c1.opVoid()
try:
c2.opVoid()
test(False)
except Ice.ObjectNotExistException:
pass
self.called()
def opStruct(self, rso, so):
test(rso.p == None)
test(rso.e == Test.MyEnum.enum2)
test(rso.s.s == "def")
test(so.e == Test.MyEnum.enum3)
test(so.s.s == "a new string")
# We can't do the callbacks below in serialize mode.
if self._communicator.getProperties().getPropertyAsInt("Ice.ThreadPool.Client.Serialize") == 0:
so.p.opVoid()
self.called()
def opByteS(self, rso, bso):
test(len(bso) == 4)
test(bso[0] == '\x22')
test(bso[1] == '\x12')
test(bso[2] == '\x11')
test(bso[3] == '\x01')
test(len(rso) == 8)
test(rso[0] == '\x01')
test(rso[1] == '\x11')
test(rso[2] == '\x12')
test(rso[3] == '\x22')
test(rso[4] == '\xf1')
test(rso[5] == '\xf2')
test(rso[6] == '\xf3')
test(rso[7] == '\xf4')
self.called()
def opBoolS(self, rso, bso):
test(len(bso) == 4)
test(bso[0])
test(bso[1])
test(not bso[2])
test(not bso[3])
test(len(rso) == 3)
test(not rso[0])
test(rso[1])
test(rso[2])
self.called()
def opShortIntLongS(self, rso, sso, iso, lso):
test(len(sso) == 3)
test(sso[0] == 1)
test(sso[1] == 2)
test(sso[2] == 3)
test(len(iso) == 4)
test(iso[0] == 8)
test(iso[1] == 7)
test(iso[2] == 6)
test(iso[3] == 5)
test(len(lso) == 6)
test(lso[0] == 10)
test(lso[1] == 30)
test(lso[2] == 20)
test(lso[3] == 10)
test(lso[4] == 30)
test(lso[5] == 20)
test(len(rso) == 3)
test(rso[0] == 10)
test(rso[1] == 30)
test(rso[2] == 20)
self.called()
def opFloatDoubleS(self, rso, fso, dso):
test(len(fso) == 2)
test(fso[0] - 3.14 < 0.001)
test(fso[1] - 1.11 < 0.001)
test(len(dso) == 3)
test(dso[0] == 1.3E10)
test(dso[1] == 1.2E10)
test(dso[2] == 1.1E10)
test(len(rso) == 5)
test(rso[0] == 1.1E10)
test(rso[1] == 1.2E10)
test(rso[2] == 1.3E10)
test(rso[3] - 3.14 < 0.001)
test(rso[4] - 1.11 < 0.001)
self.called()
def opStringS(self, rso, sso):
test(len(sso) == 4)
test(sso[0] == "abc")
test(sso[1] == "de")
test(sso[2] == "fghi")
test(sso[3] == "xyz")
test(len(rso) == 3)
test(rso[0] == "fghi")
test(rso[1] == "de")
test(rso[2] == "abc")
self.called()
def opByteSS(self, rso, bso):
test(len(bso) == 2)
test(len(bso[0]) == 1)
test(bso[0][0] == '\xff')
test(len(bso[1]) == 3)
test(bso[1][0] == '\x01')
test(bso[1][1] == '\x11')
test(bso[1][2] == '\x12')
test(len(rso) == 4)
test(len(rso[0]) == 3)
test(rso[0][0] == '\x01')
test(rso[0][1] == '\x11')
test(rso[0][2] == '\x12')
test(len(rso[1]) == 1)
test(rso[1][0] == '\xff')
test(len(rso[2]) == 1)
test(rso[2][0] == '\x0e')
test(len(rso[3]) == 2)
test(rso[3][0] == '\xf2')
test(rso[3][1] == '\xf1')
self.called()
def opFloatDoubleSS(self, rso, fso, dso):
test(len(fso) == 3)
test(len(fso[0]) == 1)
test(fso[0][0] - 3.14 < 0.001)
test(len(fso[1]) == 1)
test(fso[1][0] - 1.11 < 0.001)
test(len(fso[2]) == 0)
test(len(dso) == 1)
test(len(dso[0]) == 3)
test(dso[0][0] == 1.1E10)
test(dso[0][1] == 1.2E10)
test(dso[0][2] == 1.3E10)
test(len(rso) == 2)
test(len(rso[0]) == 3)
test(rso[0][0] == 1.1E10)
test(rso[0][1] == 1.2E10)
test(rso[0][2] == 1.3E10)
test(len(rso[1]) == 3)
test(rso[1][0] == 1.1E10)
test(rso[1][1] == 1.2E10)
test(rso[1][2] == 1.3E10)
self.called()
def opStringSS(self, rso, sso):
test(len(sso) == 5)
test(len(sso[0]) == 1)
test(sso[0][0] == "abc")
test(len(sso[1]) == 2)
test(sso[1][0] == "de")
test(sso[1][1] == "fghi")
test(len(sso[2]) == 0)
test(len(sso[3]) == 0)
test(len(sso[4]) == 1)
test(sso[4][0] == "xyz")
test(len(rso) == 3)
test(len(rso[0]) == 1)
test(rso[0][0] == "xyz")
test(len(rso[1]) == 0)
test(len(rso[2]) == 0)
self.called()
def opByteBoolD(self, ro, do):
di1 = {10: True, 100: False}
test(do == di1)
test(len(ro) == 4)
test(ro[10])
test(not ro[11])
test(not ro[100])
test(ro[101])
self.called()
def opShortIntD(self, ro, do):
di1 = {110: -1, 1100: 123123}
test(do == di1)
test(len(ro) == 4)
test(ro[110] == -1)
test(ro[111] == -100)
test(ro[1100] == 123123)
test(ro[1101] == 0)
self.called()
def opLongFloatD(self, ro, do):
di1 = {999999110: -1.1, 999999111: 123123.2}
for k in do:
test(math.fabs(do[k] - di1[k]) < 0.01)
test(len(ro) == 4)
test(ro[999999110] - -1.1 < 0.01)
test(ro[999999120] - -100.4 < 0.01)
test(ro[999999111] - 123123.2 < 0.01)
test(ro[999999130] - 0.5 < 0.01)
self.called()
def opStringStringD(self, ro, do):
di1 = {'foo': 'abc -1.1', 'bar': 'abc 123123.2'}
test(do == di1)
test(len(ro) == 4)
test(ro["foo"] == "abc -1.1")
test(ro["FOO"] == "abc -100.4")
test(ro["bar"] == "abc 123123.2")
test(ro["BAR"] == "abc 0.5")
self.called()
def opStringMyEnumD(self, ro, do):
di1 = {'abc': Test.MyEnum.enum1, '': Test.MyEnum.enum2}
test(do == di1)
test(len(ro) == 4)
test(ro["abc"] == Test.MyEnum.enum1)
test(ro["qwerty"] == Test.MyEnum.enum3)
test(ro[""] == Test.MyEnum.enum2)
test(ro["Hello!!"] == Test.MyEnum.enum2)
self.called()
def opMyEnumStringD(self, ro, do):
di1 = {Test.MyEnum.enum1: 'abc'}
test(do == di1)
test(len(ro) == 3)
test(ro[Test.MyEnum.enum1] == "abc")
test(ro[Test.MyEnum.enum2] == "Hello!!")
test(ro[Test.MyEnum.enum3] == "qwerty")
self.called()
def opMyStructMyEnumD(self, ro, do):
s11 = Test.MyStruct()
s11.i = 1
s11.j = 1
s12 = Test.MyStruct()
s12.i = 1
s12.j = 2
s22 = Test.MyStruct()
s22.i = 2
s22.j = 2
s23 = Test.MyStruct()
s23.i = 2
s23.j = 3
di1 = {s11: Test.MyEnum.enum1, s12: Test.MyEnum.enum2}
test(do == di1)
test(len(ro) == 4)
test(ro[s11] == Test.MyEnum.enum1)
test(ro[s12] == Test.MyEnum.enum2)
test(ro[s22] == Test.MyEnum.enum3)
test(ro[s23] == Test.MyEnum.enum2)
self.called()
def opIntS(self, r):
for j in range(0, len(r)):
test(r[j] == -j)
self.called()
def opIdempotent(self):
self.called()
def opNonmutating(self):
self.called()
def opDerived(self):
self.called()
def exCB(self, ex):
test(False)
def twowaysNewAMI(communicator, p):
cb = Callback()
p.begin_ice_ping(cb.ping, cb.exCB)
cb.check()
cb = Callback()
p.begin_ice_isA(Test.MyClass.ice_staticId(), cb.isA, cb.exCB)
cb.check()
cb = Callback()
p.begin_ice_id(cb.id, cb.exCB)
cb.check()
cb = Callback()
p.begin_ice_ids(cb.ids, cb.exCB)
cb.check()
r = p.begin_opVoid()
p.end_opVoid(r)
cb = Callback()
p.begin_opVoid(cb.opVoid, cb.exCB)
cb.check()
r = p.begin_opByte(0xff, 0x0f)
(ret, p3) = p.end_opByte(r)
test(p3 == 0xf0)
test(ret == 0xff)
cb = Callback()
p.begin_opByte(0xff, 0x0f, cb.opByte, cb.exCB)
cb.check()
cb = Callback()
p.begin_opBool(True, False, cb.opBool, cb.exCB)
cb.check()
cb = Callback()
p.begin_opShortIntLong(10, 11, 12, cb.opShortIntLong, cb.exCB)
cb.check()
cb = Callback()
p.begin_opFloatDouble(3.14, 1.1E10, cb.opFloatDouble, cb.exCB)
cb.check()
cb = Callback()
p.begin_opString("hello", "world", cb.opString, cb.exCB)
cb.check()
cb = Callback()
p.begin_opMyEnum(Test.MyEnum.enum2, cb.opMyEnum, cb.exCB)
cb.check()
cb = Callback(communicator)
p.begin_opMyClass(p, cb.opMyClass, cb.exCB)
cb.check()
si1 = Test.Structure()
si1.p = p
si1.e = Test.MyEnum.enum3
si1.s = Test.AnotherStruct()
si1.s.s = "abc"
si2 = Test.Structure()
si2.p = None
si2.e = Test.MyEnum.enum2
si2.s = Test.AnotherStruct()
si2.s.s = "def"
cb = Callback(communicator)
p.begin_opStruct(si1, si2, cb.opStruct, cb.exCB)
cb.check()
bsi1 = (0x01, 0x11, 0x12, 0x22)
bsi2 = (0xf1, 0xf2, 0xf3, 0xf4)
cb = Callback()
p.begin_opByteS(bsi1, bsi2, cb.opByteS, cb.exCB)
cb.check()
bsi1 = (True, True, False)
bsi2 = (False,)
cb = Callback()
p.begin_opBoolS(bsi1, bsi2, cb.opBoolS, cb.exCB)
cb.check()
ssi = (1, 2, 3)
isi = (5, 6, 7, 8)
lsi = (10, 30, 20)
cb = Callback()
p.begin_opShortIntLongS(ssi, isi, lsi, cb.opShortIntLongS, cb.exCB)
cb.check()
fsi = (3.14, 1.11)
dsi = (1.1E10, 1.2E10, 1.3E10)
cb = Callback()
p.begin_opFloatDoubleS(fsi, dsi, cb.opFloatDoubleS, cb.exCB)
cb.check()
ssi1 = ('abc', 'de', 'fghi')
ssi2 = ('xyz',)
cb = Callback()
p.begin_opStringS(ssi1, ssi2, cb.opStringS, cb.exCB)
cb.check()
bsi1 = ((0x01, 0x11, 0x12), (0xff,))
bsi2 = ((0x0e,), (0xf2, 0xf1))
cb = Callback()
p.begin_opByteSS(bsi1, bsi2, cb.opByteSS, cb.exCB)
cb.check()
fsi = ((3.14,), (1.11,), ())
dsi = ((1.1E10, 1.2E10, 1.3E10),)
cb = Callback()
p.begin_opFloatDoubleSS(fsi, dsi, cb.opFloatDoubleSS, cb.exCB)
cb.check()
ssi1 = (('abc',), ('de', 'fghi'))
ssi2 = ((), (), ('xyz',))
cb = Callback()
p.begin_opStringSS(ssi1, ssi2, cb.opStringSS, cb.exCB)
cb.check()
di1 = {10: True, 100: False}
di2 = {10: True, 11: False, 101: True}
cb = Callback()
p.begin_opByteBoolD(di1, di2, cb.opByteBoolD, cb.exCB)
cb.check()
di1 = {110: -1, 1100: 123123}
di2 = {110: -1, 111: -100, 1101: 0}
cb = Callback()
p.begin_opShortIntD(di1, di2, cb.opShortIntD, cb.exCB)
cb.check()
di1 = {999999110: -1.1, 999999111: 123123.2}
di2 = {999999110: -1.1, 999999120: -100.4, 999999130: 0.5}
cb = Callback()
p.begin_opLongFloatD(di1, di2, cb.opLongFloatD, cb.exCB)
cb.check()
di1 = {'foo': 'abc -1.1', 'bar': 'abc 123123.2'}
di2 = {'foo': 'abc -1.1', 'FOO': 'abc -100.4', 'BAR': 'abc 0.5'}
cb = Callback()
p.begin_opStringStringD(di1, di2, cb.opStringStringD, cb.exCB)
cb.check()
di1 = {'abc': Test.MyEnum.enum1, '': Test.MyEnum.enum2}
di2 = {'abc': Test.MyEnum.enum1, 'qwerty': Test.MyEnum.enum3, 'Hello!!': Test.MyEnum.enum2}
cb = Callback()
p.begin_opStringMyEnumD(di1, di2, cb.opStringMyEnumD, cb.exCB)
cb.check()
di1 = {Test.MyEnum.enum1: 'abc'}
di2 = {Test.MyEnum.enum2: 'Hello!!', Test.MyEnum.enum3: 'qwerty'}
cb = Callback()
p.begin_opMyEnumStringD(di1, di2, cb.opMyEnumStringD, cb.exCB)
cb.check()
s11 = Test.MyStruct()
s11.i = 1
s11.j = 1
s12 = Test.MyStruct()
s12.i = 1
s12.j = 2
s22 = Test.MyStruct()
s22.i = 2
s22.j = 2
s23 = Test.MyStruct()
s23.i = 2
s23.j = 3
di1 = {s11: Test.MyEnum.enum1, s12: Test.MyEnum.enum2}
di2 = {s11: Test.MyEnum.enum1, s22: Test.MyEnum.enum3, s23: Test.MyEnum.enum2}
cb = Callback()
p.begin_opMyStructMyEnumD(di1, di2, cb.opMyStructMyEnumD, cb.exCB)
cb.check()
lengths = ( 0, 1, 2, 126, 127, 128, 129, 253, 254, 255, 256, 257, 1000 )
for l in lengths:
s = []
for i in range(l):
s.append(i)
cb = Callback(l)
p.begin_opIntS(s, cb.opIntS, cb.exCB)
cb.check()
ctx = {'one': 'ONE', 'two': 'TWO', 'three': 'THREE'}
test(len(p.ice_getContext()) == 0)
r = p.begin_opContext()
c = p.end_opContext(r)
test(c != ctx)
test(len(p.ice_getContext()) == 0)
r = p.begin_opContext(_ctx=ctx)
c = p.end_opContext(r)
test(c == ctx)
p2 = Test.MyClassPrx.checkedCast(p.ice_context(ctx))
test(p2.ice_getContext() == ctx)
r = p2.begin_opContext()
c = p2.end_opContext(r)
test(c == ctx)
r = p2.begin_opContext(_ctx=ctx)
c = p2.end_opContext(r)
test(c == ctx)
#
# Test implicit context propagation
#
impls = ( 'Shared', 'PerThread' )
for i in impls:
initData = Ice.InitializationData()
initData.properties = communicator.getProperties().clone()
initData.properties.setProperty('Ice.ImplicitContext', i)
ic = Ice.initialize(data=initData)
ctx = {'one': 'ONE', 'two': 'TWO', 'three': 'THREE'}
p3 = Test.MyClassPrx.uncheckedCast(ic.stringToProxy("test:default -p 12010"))
ic.getImplicitContext().setContext(ctx)
test(ic.getImplicitContext().getContext() == ctx)
r = p3.begin_opContext()
c = p3.end_opContext(r)
test(c == ctx)
ic.getImplicitContext().put('zero', 'ZERO')
ctx = ic.getImplicitContext().getContext()
r = p3.begin_opContext()
c = p3.end_opContext(r)
test(c == ctx)
prxContext = {'one': 'UN', 'four': 'QUATRE'}
combined = {}
combined.update(ctx)
combined.update(prxContext)
test(combined['one'] == 'UN')
p3 = Test.MyClassPrx.uncheckedCast(p3.ice_context(prxContext))
ic.getImplicitContext().setContext({})
r = p3.begin_opContext()
c = p3.end_opContext(r)
test(c == prxContext)
ic.getImplicitContext().setContext(ctx)
r = p3.begin_opContext()
c = p3.end_opContext(r)
test(c == combined)
ic.destroy()
cb = Callback()
p.begin_opIdempotent(cb.opIdempotent, cb.exCB)
cb.check()
cb = Callback()
p.begin_opNonmutating(cb.opNonmutating, cb.exCB)
cb.check()
derived = Test.MyDerivedClassPrx.checkedCast(p)
test(derived)
cb = Callback()
derived.begin_opDerived(cb.opDerived, cb.exCB)
cb.check()
| gpl-2.0 | 4,795,975,078,609,575,000 | 26.00625 | 103 | 0.508852 | false |
yordan-desta/QgisIns | python/plugins/processing/core/Processing.py | 1 | 13294 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Processing.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.utils import iface
import processing
from processing.modeler.ModelerUtils import ModelerUtils
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.gui.AlgorithmClassification import AlgorithmDecorator
from processing.gui.MessageBarProgress import MessageBarProgress
from processing.gui.RenderingStyles import RenderingStyles
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.gui.AlgorithmExecutor import runalg
from processing.modeler.ModelerAlgorithmProvider import \
ModelerAlgorithmProvider
from processing.modeler.ModelerOnlyAlgorithmProvider import \
ModelerOnlyAlgorithmProvider
from processing.algs.qgis.QGISAlgorithmProvider import QGISAlgorithmProvider
from processing.algs.grass.GrassAlgorithmProvider import GrassAlgorithmProvider
from processing.algs.grass7.Grass7AlgorithmProvider import Grass7AlgorithmProvider
from processing.algs.lidar.LidarToolsAlgorithmProvider import \
LidarToolsAlgorithmProvider
from processing.algs.gdal.GdalOgrAlgorithmProvider import GdalOgrAlgorithmProvider
from processing.algs.otb.OTBAlgorithmProvider import OTBAlgorithmProvider
from processing.algs.r.RAlgorithmProvider import RAlgorithmProvider
from processing.algs.saga.SagaAlgorithmProvider import SagaAlgorithmProvider
from processing.script.ScriptAlgorithmProvider import ScriptAlgorithmProvider
from processing.algs.taudem.TauDEMAlgorithmProvider import TauDEMAlgorithmProvider
from processing.tools import dataobjects
class Processing:
listeners = []
providers = []
# A dictionary of algorithms. Keys are names of providers
# and values are list with all algorithms from that provider
algs = {}
# Same structure as algs
actions = {}
# All the registered context menu actions for the toolbox
contextMenuActions = []
modeler = ModelerAlgorithmProvider()
@staticmethod
def addProvider(provider, updateList=False):
"""Use this method to add algorithms from external providers.
"""
# Note: this might slow down the initialization process if
# there are many new providers added. Should think of a
# different solution
try:
provider.initializeSettings()
Processing.providers.append(provider)
ProcessingConfig.readSettings()
if updateList:
Processing.updateAlgsList()
except:
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
Processing.tr('Could not load provider: %s\n%s')
% (provider.getDescription(), unicode(sys.exc_info()[1])))
Processing.removeProvider(provider)
@staticmethod
def removeProvider(provider):
"""Use this method to remove a provider.
This method should be called when unloading a plugin that
contributes a provider.
"""
try:
provider.unload()
Processing.providers.remove(provider)
ProcessingConfig.readSettings()
Processing.updateAlgsList()
except:
# This try catch block is here to avoid problems if the
# plugin with a provider is unloaded after the Processing
# framework itself has been unloaded. It is a quick fix
# before I found out how to properly avoid that.
pass
@staticmethod
def getProviderFromName(name):
"""Returns the provider with the given name."""
for provider in Processing.providers:
if provider.getName() == name:
return provider
return Processing.modeler
@staticmethod
def initialize():
# Add the basic providers
Processing.addProvider(QGISAlgorithmProvider())
Processing.addProvider(ModelerOnlyAlgorithmProvider())
Processing.addProvider(GdalOgrAlgorithmProvider())
Processing.addProvider(LidarToolsAlgorithmProvider())
Processing.addProvider(OTBAlgorithmProvider())
Processing.addProvider(RAlgorithmProvider())
Processing.addProvider(SagaAlgorithmProvider())
Processing.addProvider(GrassAlgorithmProvider())
Processing.addProvider(Grass7AlgorithmProvider())
Processing.addProvider(ScriptAlgorithmProvider())
Processing.addProvider(TauDEMAlgorithmProvider())
Processing.addProvider(Processing.modeler)
Processing.modeler.initializeSettings()
# And initialize
AlgorithmDecorator.loadClassification()
ProcessingLog.startLogging()
ProcessingConfig.initialize()
ProcessingConfig.readSettings()
RenderingStyles.loadStyles()
Processing.loadFromProviders()
@staticmethod
def updateAlgsList():
"""Call this method when there has been any change that
requires the list of algorithms to be created again from
algorithm providers.
"""
Processing.loadFromProviders()
Processing.fireAlgsListHasChanged()
@staticmethod
def loadFromProviders():
Processing.loadAlgorithms()
Processing.loadActions()
Processing.loadContextMenuActions()
@staticmethod
def updateProviders():
providers = [p for p in Processing.providers if p.getName() != "model"]
for provider in providers:
provider.loadAlgorithms()
@staticmethod
def addAlgListListener(listener):
"""
Listener should implement a algsListHasChanged() method.
Whenever the list of algorithms changes, that method will be
called for all registered listeners.
"""
Processing.listeners.append(listener)
@staticmethod
def fireAlgsListHasChanged():
for listener in Processing.listeners:
listener.algsListHasChanged()
@staticmethod
def loadAlgorithms():
Processing.algs = {}
Processing.updateProviders()
providers = [p for p in Processing.providers if p.getName() != "model"]
for provider in providers:
providerAlgs = provider.algs
algs = {}
for alg in providerAlgs:
algs[alg.commandLineName()] = alg
Processing.algs[provider.getName()] = algs
provs = {}
for provider in Processing.providers:
provs[provider.getName()] = provider
ModelerUtils.allAlgs = Processing.algs
ModelerUtils.providers = provs
Processing.modeler.loadAlgorithms()
algs = {}
for alg in Processing.modeler.algs:
algs[alg.commandLineName()] = alg
Processing.algs[Processing.modeler.getName()] = algs
@staticmethod
def loadActions():
for provider in Processing.providers:
providerActions = provider.actions
actions = list()
for action in providerActions:
actions.append(action)
Processing.actions[provider.getName()] = actions
Processing.actions[provider.getName()] = actions
@staticmethod
def loadContextMenuActions():
Processing.contextMenuActions = []
for provider in Processing.providers:
providerActions = provider.contextMenuActions
for action in providerActions:
Processing.contextMenuActions.append(action)
@staticmethod
def getAlgorithm(name):
for provider in Processing.algs.values():
if name in provider:
return provider[name]
return None
@staticmethod
def getAlgorithmFromFullName(name):
for provider in Processing.algs.values():
for alg in provider.values():
if alg.name == name:
return alg
return None
@staticmethod
def getObject(uri):
"""Returns the QGIS object identified by the given URI."""
return dataobjects.getObjectFromUri(uri)
@staticmethod
def runandload(name, *args):
Processing.runAlgorithm(name, handleAlgorithmResults, *args)
@staticmethod
def runAlgorithm(algOrName, onFinish, *args):
if isinstance(algOrName, GeoAlgorithm):
alg = algOrName
else:
alg = Processing.getAlgorithm(algOrName)
if alg is None:
print 'Error: Algorithm not found\n'
return
alg = alg.getCopy()
if len(args) == 1 and isinstance(args[0], dict):
# Set params by name and try to run the alg even if not all parameter values are provided,
# by using the default values instead.
setParams = []
for (name, value) in args[0].items():
param = alg.getParameterFromName(name)
if param and param.setValue(value):
setParams.append(name)
continue
output = alg.getOutputFromName(name)
if output and output.setValue(value):
continue
print 'Error: Wrong parameter value %s for parameter %s.' \
% (value, name)
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
Processing.tr('Error in %s. Wrong parameter value %s for parameter %s.') \
% (alg.name, value, name))
return
# fill any missing parameters with default values if allowed
for param in alg.parameters:
if param.name not in setParams:
if not param.setValue(None):
print ('Error: Missing parameter value for parameter %s.' % (param.name))
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
Processing.tr('Error in %s. Missing parameter value for parameter %s.') \
% (alg.name, param.name))
return
else:
if len(args) != alg.getVisibleParametersCount() \
+ alg.getVisibleOutputsCount():
print 'Error: Wrong number of parameters'
processing.alghelp(algOrName)
return
i = 0
for param in alg.parameters:
if not param.hidden:
if not param.setValue(args[i]):
print 'Error: Wrong parameter value: ' \
+ unicode(args[i])
return
i = i + 1
for output in alg.outputs:
if not output.hidden:
if not output.setValue(args[i]):
print 'Error: Wrong output value: ' + unicode(args[i])
return
i = i + 1
msg = alg.checkParameterValuesBeforeExecuting()
if msg:
print 'Unable to execute algorithm\n' + msg
return
if not alg.checkInputCRS():
print 'Warning: Not all input layers use the same CRS.\n' \
+ 'This can cause unexpected results.'
if iface is not None:
# Don't set the wait cursor twice, because then when you
# restore it, it will still be a wait cursor.
cursor = QApplication.overrideCursor()
if cursor is None or cursor == 0:
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
elif cursor.shape() != Qt.WaitCursor:
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
progress = None
if iface is not None :
progress = MessageBarProgress()
ret = runalg(alg, progress)
if onFinish is not None and ret:
onFinish(alg, progress)
if iface is not None:
QApplication.restoreOverrideCursor()
progress.close()
return alg
@staticmethod
def tr(string, context=''):
if context == '':
context = 'Processing'
return QCoreApplication.translate(context, string)
| gpl-2.0 | -4,520,054,536,477,009,000 | 36.342697 | 102 | 0.613435 | false |
haphaeu/yoshimi | neural_networks/simple/run_network.py | 1 | 4301 | import numpy as np
from os import path
# import network
# import network_full as network
import network
from random import random
from matplotlib import pyplot as plt
from pylab import rcParams
rcParams['figure.figsize'] = 12, 7
# ################################################################
# Network specific inputs
global count_n
global req_output_sz
count_n = 6
req_output_sz = count_n
network.vectorized_result.sz = req_output_sz
def gen_data(n_train, n_test):
"""This function fakes training and test data.
Use creativity here to model and train nn on anything =)
Implemented below is a list of random numbers, with the
expected output being the index of the highest number.
So for example:
x => vect(y) => y
[0.2, 0.4, 0.2, 0.7] => [0, 0, 0, 1] => 3
[0.9, 0.4, 0.2, 0.5] => [1, 0, 0, 0] => 0
"""
data_train, data_test = [], []
for i in range(n_train):
lst = np.array([random() for _ in range(count_n)])
tot = network.vectorized_result(np.argmax(lst))
data_train.append([lst.reshape((count_n, 1)), tot])
for i in range(n_test):
lst = np.array([random() for _ in range(count_n)])
tot = np.argmax(lst)
data_test.append([lst.reshape((count_n, 1)), tot])
return data_train, data_test
# ################################################################
"""
Some convenient functions to extend the class Network
"""
def check(net, data):
for datai in data:
x, y = datai
a = np.argmax(net.feedforward(x))
print('%d --> %d %s' % (y, a, '' if y == a else 'WRONG'))
network.Network.check = check
def save_state(self, fname='netstate'):
"""Save the current weights and biases of the network."""
import pickle
with open(fname, 'wb') as pf:
pickle.dump([self.biases, self.weights], pf)
network.Network.save_state = save_state
def load_state(self, fname='netstate'):
"""Load a previously saved weights and biases of the network."""
import pickle
with open(fname, 'rb') as pf:
self.biases, self.weights = pickle.load(pf)
network.Network.load_state = load_state
# ###
# # quebrando as chamadas pra poder brincar com isso
# ###
training_data, test_data = 0, 0
def load_data():
global training_data, test_data
training_data, test_data = gen_data(100000, 10000)
net = 0
def init_net(hidden_layers=15):
global net
net = network.Network([count_n, hidden_layers, req_output_sz], cost=network.CrossEntropyCost)
def train(epochs=5, eta=0.5, lmbda=0.5):
# set lmbda proportional to training set size: 5 for 50000, 0.1 for 1000.
return net.SGD(training_data, epochs, 10, eta=eta, lmbda=lmbda, evaluation_data=test_data,
monitor_evaluation_cost=True, monitor_evaluation_accuracy=True,
monitor_training_cost=True, monitor_training_accuracy=True)
def main():
if True:
load_data()
init_net(150)
ret = train(500, 0.1, 2.0)
return ret
epochs = 50
# Default hydden layers, epochs, eta, lambda
defaults = [15, 0.5, 0.5]
cases = {'lmbda': [0.1, 0.2, 0.5, 1.0, 2.0, 5.0],
'eta': [0.1, 0.2, 0.5, 1.0, 2.0, 5.0],
'hidden_layers': [15, 35, 70, 150]
}
load_data()
for var in cases:
for val in cases[var]:
hidden_layers, eta, lmbda = defaults
if var == 'lmbda':
lmbda = val
title = 'Epochs %d, Layers %d, eta %.2f' % (epochs, hidden_layers, eta)
elif var == 'eta':
eta = val
title = 'Epochs %d, Layers %d, lambda %.2f' % (epochs, hidden_layers, lmbda)
elif var == 'hidden_layers':
hidden_layers = val
title = 'Epochs %d, eta %d, lambda %.2f' % (epochs, eta, lmbda)
init_net(hidden_layers)
ret = train(epochs, eta, lmbda)
plt.subplot(211)
plt.plot(ret[0], label=var + str(val))
plt.legend(loc='best')
plt.title(title)
plt.subplot(212)
plt.plot(np.array(ret[1]))
plt.savefig('%s.png' % var, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
ret = main()
| lgpl-3.0 | 3,718,649,668,900,855,000 | 28.458904 | 97 | 0.561265 | false |
upTee/upTee | uptee/lib/twconfig.py | 1 | 3894 |
class Config:
def __init__(self, path):
self.path = path
self.options = {}
self.votes = []
self.tunes = []
self.available_rcon_commands = []
self.rcon_commands = []
def read(self):
with open(self.path) as f:
lines = f.readlines()
lines = [line.strip() for line in lines if len(line.strip()) and ((line.strip()[0] != '#' and ' ' in line.strip()) or (len(line.strip()) > 9 and line.strip()[:9] == '#command:'))]
options = [line for line in lines if line.split(' ', 1)[0] not in ['add_vote', 'tune'] and line.split(' ', 1)[0][0] != '#']
tunes = [line.split(' ', 1)[1] for line in lines if line.split(' ', 1)[0] == 'tune']
votes = [line.split(' ', 1)[1] for line in lines if line.split(' ', 1)[0] == 'add_vote']
rcon_commands = [line[9:] for line in lines if line[:9] == '#command:']
self.options = {}
for line in options:
command = line.split(' ', 1)[0]
widget = line.rsplit(' ', 1)[1].split(':', 1)[1] if line.rsplit(' ', 1)[1][0] == '#' and '#widget:' in line.rsplit(' ', 1)[1] else 'text'
line = line.split(' ', 1)[1]
if ' ' in line and line.rsplit(' ', 1)[1][0] == '#' and '#widget:' in line.rsplit(' ', 1)[1]:
line = line.rsplit(' ', 1)[0]
value = line.strip('"')
# in case of select widget save the selections to the value
if len(widget) >= 7:
if widget[:7] == 'select:' and len(widget[7:]):
selections = widget.split(':', 1)[1].split(',')
widget = 'select'
if value not in selections:
selections.append(value)
for selection in selections:
value += ',{0}'.format(selection)
elif widget[:7] == 'select:':
widget = 'text'
self.options[command] = (value[:1000], widget)
self.tunes = [{'command': line.rsplit()[0].strip('"'), 'value': float(line.split()[1].strip('"'))} for line in tunes]
self.votes = [{'command': line.rsplit('" ', 1)[1].strip('"'), 'title': line.rsplit('" ', 1)[0].strip('"')} for line in votes if len(line.split('" ')) == 2]
for line in rcon_commands:
self.available_rcon_commands.extend([command for command in line.split() if command not in self.available_rcon_commands])
def write(self, path=None):
if not path:
path = self.path
with open(path, 'w') as f:
for key, value in self.options.iteritems():
f.write(u'{0} "{1}" #widget:{2}\n'.format(key, value[0], value[1]).encode('UTF-8'))
for tune in self.tunes:
f.write(u'tune {0} {1}\n'.format(tune['command'], tune['value']).encode('UTF-8'))
for vote in self.votes:
f.write(u'add_vote "{0}" "{1}"\n'.format(vote['title'], vote['command']).encode('UTF-8'))
for rcon_command in self.rcon_commands:
f.write(u'{0} {1}\n'.format(rcon_command['command'], rcon_command['value']).encode('UTF-8'))
def add_option(self, command, value, widget='text'):
if isinstance(value, int):
value = str(value)
self.options[command] = (value.replace('"', r'\"'), widget)
def add_tune(self, command, value):
self.tunes.append({'command': command, 'value': float(value)})
def add_vote(self, command, title):
self.votes.append({'command': command.replace('"', r'\"'), 'title': title.replace('"', r'\"')})
def add_rcon_command(self, command, value):
self.rcon_commands.append({'command': command.replace('"', r'\"'), 'value': value.replace('"', r'\"')})
| bsd-3-clause | 3,208,990,384,926,467,600 | 53.84507 | 191 | 0.500257 | false |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/meta/gpl.py | 1 | 73166 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
licenseHTML = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>GNU General Public License - GNU Project - Free Software Foundation (FSF)</title>
</head>
<body>
<h3 style="text-align: center;">GNU GENERAL PUBLIC LICENSE</h3>
<p style="text-align: center;">Version 3, 29 June 2007</p>
<p>Copyright © 2007 Free Software Foundation, Inc. <<a href="http://fsf.org/">http://fsf.org/</a>></p><p>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.</p>
<h3><a name="preamble"></a>Preamble</h3>
<p>The GNU General Public License is a free, copyleft license for
software and other kinds of works.</p>
<p>The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.</p>
<p>When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.</p>
<p>To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.</p>
<p>For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.</p>
<p>Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.</p>
<p>For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.</p>
<p>Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.</p>
<p>Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.</p>
<p>The precise terms and conditions for copying, distribution and
modification follow.</p>
<h3><a name="terms"></a>TERMS AND CONDITIONS</h3>
<h4><a name="section0"></a>0. Definitions.</h4>
<p>“This License” refers to version 3 of the GNU General Public License.</p>
<p>“Copyright” also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.</p>
<p>“The Program” refers to any copyrightable work licensed under this
License. Each licensee is addressed as “you”. “Licensees” and
“recipients” may be individuals or organizations.</p>
<p>To “modify” a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a “modified version” of the
earlier work or a work “based on” the earlier work.</p>
<p>A “covered work” means either the unmodified Program or a work based
on the Program.</p>
<p>To “propagate” a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.</p>
<p>To “convey” a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.</p>
<p>An interactive user interface displays “Appropriate Legal Notices”
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.</p>
<h4><a name="section1"></a>1. Source Code.</h4>
<p>The “source code” for a work means the preferred form of the work
for making modifications to it. “Object code” means any non-source
form of a work.</p>
<p>A “Standard Interface” means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.</p>
<p>The “System Libraries” of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
“Major Component”, in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.</p>
<p>The “Corresponding Source” for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.</p>
<p>The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.</p>
<p>The Corresponding Source for a work in source code form is that
same work.</p>
<h4><a name="section2"></a>2. Basic Permissions.</h4>
<p>All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.</p>
<p>You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.</p>
<p>Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.</p>
<h4><a name="section3"></a>3. Protecting Users' Legal Rights From Anti-Circumvention Law.</h4>
<p>No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.</p>
<p>When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.</p>
<h4><a name="section4"></a>4. Conveying Verbatim Copies.</h4>
<p>You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.</p>
<p>You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.</p>
<h4><a name="section5"></a>5. Conveying Modified Source Versions.</h4>
<p>You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:</p>
<ul>
<li>a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.</li>
<li>b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
“keep intact all notices”.</li>
<li>c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.</li>
<li>d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.</li>
</ul>
<p>A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
“aggregate” if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.</p>
<h4><a name="section6"></a>6. Conveying Non-Source Forms.</h4>
<p>You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:</p>
<ul>
<li>a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.</li>
<li>b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.</li>
<li>c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.</li>
<li>d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.</li>
<li>e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.</li>
</ul>
<p>A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.</p>
<p>A “User Product” is either (1) a “consumer product”, which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, “normally used” refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.</p>
<p>“Installation Information” for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.</p>
<p>If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).</p>
<p>The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.</p>
<p>Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.</p>
<h4><a name="section7"></a>7. Additional Terms.</h4>
<p>“Additional permissions” are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.</p>
<p>When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.</p>
<p>Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:</p>
<ul>
<li>a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or</li>
<li>b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or</li>
<li>c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or</li>
<li>d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or</li>
<li>e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or</li>
<li>f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.</li>
</ul>
<p>All other non-permissive additional terms are considered “further
restrictions” within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.</p>
<p>If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.</p>
<p>Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.</p>
<h4><a name="section8"></a>8. Termination.</h4>
<p>You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).</p>
<p>However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.</p>
<p>Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.</p>
<p>Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.</p>
<h4><a name="section9"></a>9. Acceptance Not Required for Having Copies.</h4>
<p>You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.</p>
<h4><a name="section10"></a>10. Automatic Licensing of Downstream Recipients.</h4>
<p>Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.</p>
<p>An “entity transaction” is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.</p>
<p>You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.</p>
<h4><a name="section11"></a>11. Patents.</h4>
<p>A “contributor” is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's “contributor version”.</p>
<p>A contributor's “essential patent claims” are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, “control” includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.</p>
<p>Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.</p>
<p>In the following three paragraphs, a “patent license” is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To “grant” such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.</p>
<p>If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. “Knowingly relying” means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.</p>
<p>If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.</p>
<p>A patent license is “discriminatory” if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.</p>
<p>Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.</p>
<h4><a name="section12"></a>12. No Surrender of Others' Freedom.</h4>
<p>If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.</p>
<h4><a name="section13"></a>13. Use with the GNU Affero General Public License.</h4>
<p>Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.</p>
<h4><a name="section14"></a>14. Revised Versions of this License.</h4>
<p>The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.</p>
<p>Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License “or any later version” applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.</p>
<p>If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.</p>
<p>Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.</p>
<h4><a name="section15"></a>15. Disclaimer of Warranty.</h4>
<p>THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.</p>
<h4><a name="section16"></a>16. Limitation of Liability.</h4>
<p>IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.</p>
<h4><a name="section17"></a>17. Interpretation of Sections 15 and 16.</h4>
<p>If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.</p>
<p>END OF TERMS AND CONDITIONS</p>
<h3><a name="howto"></a>How to Apply These Terms to Your New Programs</h3>
<p>If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.</p>
<p>To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the “copyright” line and a pointer to where the full notice is found.</p>
<pre> <one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
</pre>
<p>Also add information on how to contact you by electronic and paper mail.</p>
<p>If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:</p>
<pre> <program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
</pre>
<p>The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an “about box”.</p>
<p>You should also get your employer (if you work as a programmer) or school,
if any, to sign a “copyright disclaimer” for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<<a href="http://www.gnu.org/licenses/">http://www.gnu.org/licenses/</a>>.</p>
<p>The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<<a href="http://www.gnu.org/philosophy/why-not-lgpl.html">http://www.gnu.org/philosophy/why-not-lgpl.html</a>>.</p>
</body></html>
'''
licenseText='''
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
'''
| gpl-3.0 | 8,660,533,589,698,813,000 | 51.411175 | 122 | 0.787142 | false |
bundgus/python-playground | matplotlib-playground/examples/units/bar_demo2.py | 1 | 1076 | """
plot using a variety of cm vs inches conversions. The example shows
how default unit instrospection works (ax1), how various keywords can
be used to set the x and y units to override the defaults (ax2, ax3,
ax4) and how one can set the xlimits using scalars (ax3, current units
assumed) or units (conversions applied to get the numbers to current
units)
"""
import numpy as np
from basic_units import cm, inch
import matplotlib.pyplot as plt
cms = cm * np.arange(0, 10, 2)
bottom = 0*cm
width = 0.8*cm
fig = plt.figure()
ax1 = fig.add_subplot(2, 2, 1)
ax1.bar(cms, cms, bottom=bottom)
ax2 = fig.add_subplot(2, 2, 2)
ax2.bar(cms, cms, bottom=bottom, width=width, xunits=cm, yunits=inch)
ax3 = fig.add_subplot(2, 2, 3)
ax3.bar(cms, cms, bottom=bottom, width=width, xunits=inch, yunits=cm)
ax3.set_xlim(2, 6) # scalars are interpreted in current units
ax4 = fig.add_subplot(2, 2, 4)
ax4.bar(cms, cms, bottom=bottom, width=width, xunits=inch, yunits=inch)
#fig.savefig('simple_conversion_plot.png')
ax4.set_xlim(2*cm, 6*cm) # cm are converted to inches
plt.show()
| mit | 3,320,166,305,052,029,000 | 28.888889 | 71 | 0.722119 | false |
n3storm/seantis-questionnaire | questionnaire/emails.py | 1 | 6183 | # -*- coding: utf-8
"""
Functions to send email reminders to users.
"""
from django.core.mail import SMTPConnection, EmailMessage
from django.contrib.auth.decorators import login_required
from django.template import Context, loader
from django.utils import translation
from django.conf import settings
from models import Subject, QuestionSet, RunInfo, Questionnaire
from datetime import datetime
from django.shortcuts import render_to_response, get_object_or_404
import random, time, smtplib, rfc822
from email.Header import Header
from email.Utils import formataddr, parseaddr
try: from hashlib import md5
except: from md5 import md5
def encode_emailaddress(address):
"""
Encode an email address as ASCII using the Encoded-Word standard.
Needed to work around http://code.djangoproject.com/ticket/11144
"""
try: return address.encode('ascii')
except UnicodeEncodeError: pass
nm, addr = parseaddr(address)
return formataddr( (str(Header(nm, settings.DEFAULT_CHARSET)), addr) )
def _new_random(subject):
"""
Create a short unique randomized string.
Returns: subject_id + 'z' +
md5 hexdigest of subject's surname, nextrun date, and a random number
"""
return "%dz%s" % (subject.id, md5(subject.surname + str(subject.nextrun) + hex(random.randint(1,999999))).hexdigest()[:6])
def _new_runinfo(subject, questionset):
"""
Create a new RunInfo entry with a random code
If a unique subject+runid entry already exists, return that instead..
That should only occurs with manual database changes
"""
nextrun = subject.nextrun
runid = str(nextrun.year)
entries = list(RunInfo.objects.filter(runid=runid, subject=subject))
if len(entries)>0:
r = entries[0]
else:
r = RunInfo()
r.random = _new_random(subject)
r.subject = subject
r.runid = runid
r.emailcount = 0
r.created = datetime.now()
r.questionset = questionset
r.save()
if nextrun.month == 2 and nextrun.day == 29: # the only exception?
subject.nextrun = datetime(nextrun.year + 1, 2, 28)
else:
subject.nextrun = datetime(nextrun.year + 1, nextrun.month, nextrun.day)
subject.save()
return r
def _send_email(runinfo):
"Send the email for a specific runinfo entry"
subject = runinfo.subject
translation.activate(subject.language)
tmpl = loader.get_template(settings.QUESTIONNAIRE_EMAIL_TEMPLATE)
c = Context()
c['surname'] = subject.surname
c['givenname'] = subject.givenname
c['gender'] = subject.gender
c['email'] = subject.email
c['random'] = runinfo.random
c['runid'] = runinfo.runid
c['created'] = runinfo.created
c['site'] = getattr(settings, 'QUESTIONNAIRE_URL', '(settings.QUESTIONNAIRE_URL not set)')
email = tmpl.render(c)
emailFrom = settings.QUESTIONNAIRE_EMAIL_FROM
emailSubject, email = email.split("\n",1) # subject must be on first line
emailSubject = emailSubject.strip()
emailFrom = emailFrom.replace("$RUNINFO", runinfo.random)
emailTo = '"%s, %s" <%s>' % (subject.surname, subject.givenname, subject.email)
emailTo = encode_emailaddress(emailTo)
emailFrom = encode_emailaddress(emailFrom)
try:
conn = SMTPConnection()
msg = EmailMessage(emailSubject, email, emailFrom, [ emailTo ],
connection=conn)
msg.send()
runinfo.emailcount = 1 + runinfo.emailcount
runinfo.emailsent = datetime.now()
runinfo.lastemailerror = "OK, accepted by server"
runinfo.save()
return True
except smtplib.SMTPRecipientsRefused:
runinfo.lastemailerror = "SMTP Recipient Refused"
except smtplib.SMTPHeloError:
runinfo.lastemailerror = "SMTP Helo Error"
except smtplib.SMTPSenderRefused:
runinfo.lastemailerror = "SMTP Sender Refused"
except smtplib.SMTPDataError:
runinfo.lastemailerror = "SMTP Data Error"
runinfo.save()
return False
def send_emails(request=None, qname=None):
"""
1. Create a runinfo entry for each subject who is due and has state 'active'
2. Send an email for each runinfo entry whose subject receives email,
providing that the last sent email was sent more than a week ago.
This can be called either by "./manage.py questionnaire_emails" (without
request) or through the web, if settings.EMAILCODE is set and matches.
"""
if request and request.GET.get('code') != getattr(settings,'EMAILCODE', False):
raise Http404
if not qname:
qname = getattr(settings, 'QUESTIONNAIRE_DEFAULT', None)
if not qname:
raise Exception("QUESTIONNAIRE_DEFAULT not in settings")
questionnaire = Questionnaire.objects.get(name=qname)
questionset = QuestionSet.objects.filter(questionnaire__name=qname).order_by('sortid')
if not questionset:
raise Exception("No questionsets for questionnaire '%s' (in settings.py)" % qname)
return
questionset = questionset[0]
viablesubjects = Subject.objects.filter(nextrun__lte = datetime.now(), state='active')
for s in viablesubjects:
r = _new_runinfo(s, questionset)
runinfos = RunInfo.objects.filter(subject__formtype='email', questionset__questionnaire=questionnaire)
WEEKAGO = time.time() - (60 * 60 * 24 * 7) # one week ago
outlog = []
for r in runinfos:
if r.runid.startswith('test:'):
continue
if r.emailcount == -1:
continue
if r.emailcount == 0 or time.mktime(r.emailsent.timetuple()) < WEEKAGO:
try:
if _send_email(r):
outlog.append(u"[%s] %s, %s: OK" % (r.runid, r.subject.surname, r.subject.givenname))
else:
outlog.append(u"[%s] %s, %s: %s" % (r.runid, r.subject.surname, r.subject.givenname, r.lastemailerror))
except Exception, e:
outlog.append("Exception: [%s] %s: %s" % (r.runid, r.subject.surname, str(e)))
if request:
return HttpResponse("Sent Questionnaire Emails:\n "
+"\n ".join(outlog), mimetype="text/plain")
return "\n".join(outlog)
| bsd-3-clause | -8,836,740,893,859,258,000 | 37.886792 | 126 | 0.665858 | false |
lukovnikov/qelos | qelos/scripts/webqa/preprocessing/json2graph.py | 1 | 17885 | from __future__ import print_function
from __future__ import print_function
import qelos as q
import json, re
from nltk.corpus import stopwords
from IPython import embed
def run(trainp="../../../../datasets/webqsp/webqsp.train.json",
testp="../../../../datasets/webqsp/webqsp.test.json",
corechains_only=False,
train_entity_linking=None,
test_entity_linking=None):
traind = json.load(open(trainp))
testd = json.load(open(testp))
tq2p, traingraphs = buildgraphs(traind,
no_complex_order=True,
no_value_constraints=True,
corechains_only=corechains_only)
xq2p, testgraphs = buildgraphs(testd,
no_complex_order=True,
no_value_constraints=True,
corechains_only=corechains_only)
trainquestions = {}
testquestions = {}
for traindi in traind["Questions"]:
trainquestions[traindi["QuestionId"]] = traindi
for testdi in testd["Questions"]:
testquestions[testdi["QuestionId"]] = testdi
print_forms(tq2p, trainquestions, traingraphs, tofile="../../../../datasets/webqsp/webqsp.train.graph", find_partial_entity_name_matches=False)
print_forms(xq2p, testquestions, testgraphs, tofile="../../../../datasets/webqsp/webqsp.test.graph", find_partial_entity_name_matches=False)
def print_forms(tq2p, trainquestions, traingraphs, tofile=None, find_partial_entity_name_matches=False):
missing_entity_names = []
total = 0
if tofile is not None:
tofile = open(tofile, "w")
try:
for qid, pids in sorted(tq2p.items(), key=lambda (x, y): int(re.match(r'[^\d]+(\d+)$', x).group(1))):
question = trainquestions[qid]
language_form = question["ProcessedQuestion"]
if len(pids) > 0:
logical_form = graphtostr(traingraphs[pids[0]])
entity_names = set(re.findall(r"\[([^\]]+)\]", logical_form))
entnames = set()
for en in entity_names:
ens = tuple(en.split("/"))
if len(ens) > 1:
pass
entnames.add(ens)
if "lord of the rings" in language_form:
pass
missing_entity_name = 0
for entitynameset in entnames:
entitynamesetinlanguageform = 0
for entitynamesetel in entitynameset:
if entitynamesetel in language_form:
logical_form = logical_form.replace("[{}]".format("/".join(entitynameset)), "[{}]".format(entitynamesetel))
entitynamesetinlanguageform += 1
break
elif find_partial_entity_name_matches: # try to find substring match
partialname = find_partial(entitynamesetel, language_form)
if partialname is not None:
torep = "[{}]".format("/".join(entitynameset))
logical_form = logical_form.replace(torep, "[{}]".format(partialname))
entitynamesetinlanguageform += 1
break
else:
pass
if entitynamesetinlanguageform == 0:
missing_entity_name = 1
#print out
out = u"{}\t{}\t{}\t{}".format(qid, language_form, missing_entity_name, logical_form)
if missing_entity_name > 0:
missing_entity_names.append(out)
total += 1
else:
out = u"{}\t{}".format(qid, language_form)
total += 1
if tofile is None:
print (out)
else:
tofile.write("{}\n".format(out))
for x in missing_entity_names:
print (x)
pass
except Exception as e:
if tofile is not None:
tofile.close()
print(question)
raise e
print ("{} out of {} questions have non-matching entity labels -> {}%".format(len(missing_entity_names), total, (
total - len(missing_entity_names)) / (1. * total)))
#print entity_names
#embed()
def find_partial(key, hay):
if "patriots" in hay:
pass
haywords = q.tokenize(hay); keywords = q.tokenize(key)
if "".join(keywords) in haywords:
return "".join(keywords)
partial = []
for i in range(len(haywords)):
breakouter = False
for j in range(len(keywords)):
if (haywords[i] == keywords[j] or haywords[i] == keywords[j] + "s" or haywords[i]+"s" == keywords[j]) and haywords[i] not in set(stopwords.words("english")): # start
partial.append(haywords[i])
for k in range(1, min(len(keywords) - j, len(haywords) - i )):
if haywords[i + k] == keywords[j+k]:
partial.append(haywords[i+k])
breakouter = True
break
else:
pass
if breakouter:
break
if len(partial) > 0:
return " ".join(partial)
else:
return None
def buildgraphs(d,
no_value_constraints=False,
no_entity_constraints=False,
no_order=False,
no_complex_order=True,
corechains_only=False):
# iterate over questions and their parses, output dictionary from question id to parse ids and dictionary of parses
q2p = {}
parsegraphs = {}
multipleparsescount = 0
withoutchaincount = 0
withentconstraintcount = 0
onlyentconstraintcount = 0
withvalconstraintcount = 0
argoptcount = 0
otherordercount = 0
onlychaincount = 0
withordercount = 0
numquestions = 0
for q in d["Questions"]:
numquestions += 1
qid = q["QuestionId"]
parses = q["Parses"]
if len(parses) > 1:
multipleparsescount += 1
parses = [parses[0]]
parseids = []
for parse in parses:
parseid = parse["ParseId"]
parsegraph, hases, topicmid = buildgraph(parse, corechains_only=corechains_only)
entcont, valcont, order, argopt, chain = hases
withoutchaincount += 1 if chain is False else 0
withentconstraintcount += 1 if entcont is True else 0
onlyentconstraintcount += 1 if entcont is True and valcont is False and order is False else 0
withvalconstraintcount += 1 if valcont is True else 0
argoptcount += 1 if argopt is True else 0
otherordercount += 1 if order is True and argopt is False else 0
withordercount += 1 if order is True else 0
onlychaincount += 1 if order is False and entcont is False and valcont is False else 0
if chain and \
(not (valcont and no_value_constraints or
order and no_order or
entcont and no_entity_constraints or
order and not argopt and no_complex_order)):
parsegraphs[parseid] = parsegraph
parseids.append(parseid)
q2p[qid] = parseids
print ("number of questions: {}".format(numquestions))
print ("number of questions with multiple parses: {}".format(multipleparsescount))
print ("number of questions with only chain: {}".format(onlychaincount))
print ("number of questions without chain: {}".format(withoutchaincount))
print (
"number of questions with:\n\tentity constraints: {} ({})\n\tvalue constraints: {}\n\torder: {} ({} of which argmax, {} others)"
.format(withentconstraintcount, onlyentconstraintcount, withvalconstraintcount, withordercount, argoptcount,
otherordercount))
return q2p, parsegraphs
def buildgraph(parse, corechains_only=False):
ret, hases, topicmid = buildgraph_from_fish(parse, corechains_only=corechains_only)
return ret, hases, topicmid
def buildgraph_from_fish(parse, corechains_only=False):
haschain = False
hasentityconstraints = False
hasvalueconstraints = False
hasorder = False
hasargopt = False
haschain = False
# fish head and tail
qnode = OutputNode() # tail
#topicentity = EntityNode(parse["TopicEntityMid"], parse["TopicEntityName"]) #head
topicentity = EntityNode(parse["TopicEntityMid"], parse["PotentialTopicEntityMention"], topicentity=True) #head
# fish spine
cnode = topicentity
spinenodes = []
if parse["InferentialChain"] is not None:
haschain = True
for i, rel in enumerate(parse["InferentialChain"]):
tonode = VariableNode() if i < len(parse["InferentialChain"]) - 1 else qnode
spinenodes.append(tonode)
cnode.add_edge(rel, tonode)
cnode = tonode
# if len(parse["Sparql"]) >= len("#MANUAL SPARQL") and \
# parse["Sparql"][:len("#MANUAL SPARQL")] == "#MANUAL SPARQL":
# haschain = False
# time
# TODO
# constraints
if not corechains_only:
for constraint in parse["Constraints"]:
operator, argtype, arg, name, pos, pred, valtype = constraint["Operator"], constraint["ArgumentType"], constraint["Argument"], constraint["EntityName"], constraint["SourceNodeIndex"], constraint["NodePredicate"], constraint["ValueType"]
if argtype == "Entity":
hasentityconstraints = True
assert(operator == "Equal")
assert(valtype == "String")
ent = EntityNode(arg, name)
try:
edge = RelationEdge(spinenodes[pos], ent, pred)
except IndexError as e:
print(parse["ParseId"])
break
#raise e
spinenodes[pos].append_edge(edge)
ent.append_edge(edge)
elif argtype == "Value":
hasvalueconstraints = True
assert(name == "" or name == None)
intervar = VariableNode()
try:
edge = RelationEdge(spinenodes[pos], intervar, pred)
except IndexError as e:
print(parse["ParseId"])
break
spinenodes[pos].append_edge(edge)
intervar.append_edge(edge)
if operator == "LessOrEqual":
rel = "<="
elif operator == "GreaterOrEqual":
rel = ">="
elif operator == "Equal":
rel = "=="
else:
raise Exception("unknown operator")
val = ValueNode(arg, valuetype=valtype)
edge = MathEdge(intervar, val, rel)
intervar.append_edge(edge)
val.append_edge(edge)
# order
if parse["Order"] is not None:
hasorder = True
orderinfo = parse["Order"]
hasargopt = orderinfo["Count"] == 1 and orderinfo["Start"] == 0
if hasargopt:
argoptnode = ArgMaxNode() if orderinfo["SortOrder"] == "Descending" \
else ArgMinNode() if orderinfo["SortOrder"] == "Ascending" else None
pos = orderinfo["SourceNodeIndex"]
pred = orderinfo["NodePredicate"]
edge = RelationEdge(spinenodes[pos], argoptnode, pred)
spinenodes[pos].append_edge(edge)
argoptnode.append_edge(edge)
# TODO
return qnode, (hasentityconstraints, hasvalueconstraints, hasorder, hasargopt, haschain), parse["TopicEntityMid"]
def graphtostr(outputnode):
ret = ""
return u"{1}\t{2}\t{3}\t{0}".format(*_tostr_edge_based(outputnode))
return _tostr_rec(outputnode, "")
def _tostr_edge_based(node, top=True):
# collect all edges:
edges = set()
for edge in node.edges:
if not edge.visited:
edges.add(edge)
edge.visited = True
othernodeedges = _tostr_edge_based(edge.src if edge.tgt == node else edge.tgt, top=False)
edges.update(othernodeedges)
if top:
ret = ""
varnames = {0,}
valueconstraints = 0
for edge in edges:
# print edge
if isinstance(edge.src, VariableNode):
if edge.src._name is None:
edge.src._name = u"var{}".format(max(varnames) + 1)
varnames.add(max(varnames) + 1)
if isinstance(edge.tgt, VariableNode):
if edge.tgt._name is None:
edge.tgt._name = u"var{}".format(max(varnames) + 1)
varnames.add(max(varnames) + 1)
edgestr = u"{} {} {} ; ".format(
edge.src.value,
edge.lbl,
edge.tgt.value,
)
if isinstance(edge.src, ValueNode) or isinstance(edge.tgt, ValueNode):
valueconstraints = 1
# if edge.lbl == "people.marriage.type_of_union":
# pass
else:
ret += edgestr
numedges = len(edges)
numvars = len(varnames)-1
return ret, numedges, numvars, valueconstraints
else:
return edges
def _tostr_rec(node, acc=""):
parts = []
isleaf = True
for edge in node.edges:
if isinstance(edge, VariableNode):
pass
if not edge.visited:
isleaf = False
edge.visited = True
othernodestr = _tostr_rec(edge.src if edge.tgt == node else edge.tgt)
edge.visited = False
part = "({} {})".format(othernodestr, edge.lbl)
parts.append(part)
if isleaf:
return node.value
else:
if len(parts) > 1:
ret = "(and {})".format(" ".join(parts))
else:
ret = parts[0]
return ret
class Graph(object):
def __init__(self):
self._nodes = []
class Node(object):
def __init__(self):
self._edges = []
@property
def edges(self):
return self._edges
def add_edge(self, rel, tonode):
edg = Edge(self, tonode, rel)
self._edges.append(edg)
tonode._edges.append(edg)
def append_edge(self, edge):
self._edges.append(edge)
@property
def value(self):
raise NotImplementedError()
class VariableNode(Node):
def __init__(self, name=None):
super(VariableNode, self).__init__()
self._name = name
@property
def value(self):
return self._name
class OrderNode(Node):
def __init__(self, sort, start, count):
super(OrderNode, self).__init__()
self._sort = sort
self._start = start
self._count = count
@property
def value(self):
return None
class ArgMaxNode(Node):
@property
def value(self):
return "ARGMAX"
class ArgMinNode(Node):
@property
def value(self):
return "ARGMIN"
class OutputNode(Node):
@property
def value(self):
return "OUT"
class EntityNode(Node):
def __init__(self, id, name, topicentity=False):
super(EntityNode, self).__init__()
self._id = id
self._name = name
self._topicentity = topicentity
if self._id == "m.01m9":
self._name += "/cities/towns/villages"
elif self._id == "m.01mp":
self._name += "/countries"
elif self._id == "m.01mp":
self._name += "/british"
elif self._id == "m.0jzc":
self._name += "/arabic"
elif self._id == "m.02m29p":
self._name += "/stewie"
elif self._id == "m.060d2":
self._name += "/president"
elif self._id == "m.034qd2":
self._name += "/hawkeye"
#elif self._id == "m.04ztj":
# self._name += "/marry/married/husband/wife/wives/husbands"
#elif self._id == "m.05zppz":
# self._name += "/son/father/dad/husband/brother"
#elif self._id == "m.02zsn":
# self._name += "/daughter/mother/mom/wife/sister"
#elif self._id == "m.0ddt_":
# self._name += "/star wars"
#elif self._id == "m.06x5s":
# self._name += "/superbowl"
#elif self._id == "m.01xljv1":
# self._name += "/superbowl"
elif self._id == "m.05jxkf":
self._name += "/college/university"
elif self._id == "m.0m4mb":
self._name += "/prep school/school"
elif self._id == "m.02lk60":
self._name += "/shrek"
@property
def value(self):
try:
return u"{}[{}]{}".format(self._id, self._name.lower(), "*" if self._topicentity else "")
except UnicodeEncodeError, e:
raise e
class ValueNode(Node):
def __init__(self, value, valuetype=None):
super(ValueNode, self).__init__()
self._value = value
self._valuetype = valuetype
@property
def value(self):
return self._value.lower().replace(" ", "_")
class Edge(object):
def __init__(self, src, tgt, label):
self._src = src
self._tgt = tgt
self._lbl = label
self.visited = False
@property
def src(self):
return self._src
@property
def tgt(self):
return self._tgt
@property
def lbl(self):
return self._lbl
class RelationEdge(Edge):
pass
class MathEdge(Edge):
pass
class CountEdge(Edge):
pass
if __name__ == "__main__":
q.argprun(run) | mit | -5,518,964,007,539,494,000 | 34.629482 | 248 | 0.537713 | false |
LionelAuroux/pyrser | tests/grammar/tl4t.py | 1 | 11359 | # little grammar for test
import os
from itertools import chain
from pyrser import grammar
from pyrser import meta
from pyrser import fmt
from pyrser.parsing.node import *
from pyrser.hooks.echo import *
from pyrser.hooks.vars import *
from pyrser.hooks.set import *
from pyrser.hooks.predicate import *
from pyrser.hooks.dump_nodes import *
from pyrser.type_system import *
from pyrser.error import *
from pyrser.passes.to_yml import *
### ABSTRACTION
class NodeInfo(Node, Inference):
def __init__(self):
self.info = None
def walk(self) -> Node:
raise TypeError("Not implemented!")
class BlockStmt(NodeInfo):
def __init__(self, root=False):
super().__init__()
self.body = []
# if root node (no brace when pprint)
self.root = root
def to_tl4t(self) -> fmt.indentable:
lssub = []
for s in self.body:
lssub.append(s.to_tl4t())
lsblock = None
if self.root:
lsblock = fmt.sep('', lssub)
else:
lsblock = fmt.block('{\n', '}', [fmt.tab(lssub)])
return lsblock
# to connect Inference
def type_algos(self):
return (self.infer_block, self.body, self.feedback_block)
def walk(self) -> Node:
"""
TD descent
"""
yield ('block', (it.walk() for it in self.body))
class DeclVar(NodeInfo):
def __init__(self, name: str, t: str, expr=None):
super().__init__()
self.name = name
self.t = None
self.expr = None
if t is not None:
self.t = t
if expr is not None:
self.expr = expr
def to_tl4t(self) -> fmt.indentable:
lsdecl = [
"var",
self.name,
]
if self.t is not None:
lsdecl.append(":")
lsdecl.append(self.t)
if self.expr is not None:
lsdecl.append("=")
lsdecl.append(self.expr.to_tl4t())
else:
lsdecl[-1] += ";\n"
return fmt.sep(" ", lsdecl)
def declare_var(self, args, diagnostic=None):
parent_scope = self.type_node.parent()
typ = self.t
if self.t is None:
typ = '?1'
var = Var(self.name, typ)
parent_scope.add(var)
# try to infer type or check type
if self.expr is not None:
tn = Scope(sig=[Fun('=', typ, [typ, typ])])
tn.set_parent(parent_scope)
# create a fake Expr Node to infer expression with var type
rhs = Expr(Id('='), [Id(self.name), self.expr])
rhs.type_node = Scope()
rhs.type_node.set_parent(tn)
rhs.infer_type(diagnostic)
# TODO: scope surrounded only one sig !!!!!
print("RHS: [%s]" % rhs.type_node)
print("TRET %s" % rhs.type_node.last())
self.t = rhs.type_node.last().compute_tret
self.type_node = rhs.type_node
#var.tret = rhs.type_node.last()
var.tret = TypeName(self.t)
# to connect Inference
def type_algos(self):
return (self.declare_var, None)
def walk(self) -> Node:
"""
TD descent
"""
yield ('block', self.expr.walk())
class DeclFun(DeclVar):
def __init__(self, name: str, t: str, p: [], block=None, variadic=False):
super().__init__(name, t)
self.variadic = variadic
self.p = p
if block is not None:
self.block = block
print("SIZE OF BLOCK %d" % len(block))
def to_tl4t(self) -> fmt.indentable:
params = []
if self.p is not None:
for p in self.p:
params.append(p.to_tl4t())
parenth = fmt.block('(', ')', fmt.sep(", ", params))
lsdecl = fmt.sep(
' ',
[
"fun",
fmt.sep('', [self.name, parenth]),
":",
self.t
]
)
lsblock = None
if hasattr(self, 'block'):
lsblock = fmt.sep("\n", [lsdecl, self.block.to_tl4t()])
else:
lsblock = fmt.end(";\n", lsdecl)
return lsblock
def walk(self) -> Node:
"""
TD descent
"""
yield ('fun', (it.walk() for it in self.p))
yield ('block', self.block.walk())
class Param(NodeInfo):
def __init__(self, n: str, t: str):
super().__init__()
self.name = n
self.t = t
def to_tl4t(self):
return fmt.sep(" ", [self.name, ':', self.t])
def walk(self) -> Node:
"""
TD descent
"""
yield ('term', self, self.name)
class Terminal(NodeInfo):
def __init__(self, value):
super().__init__()
self.value = value
def to_tl4t(self) -> fmt.indentable:
return self.value
def walk(self) -> Node:
"""
TD descent
"""
yield ('term', self, self.value)
class Literal(Terminal):
def __init__(self, value, t):
self.value = value
self.type = t
# to connect Inference
def type_algos(self):
return (
self.infer_literal, (self.value, self.type), self.feedback_leaf
)
def walk(self) -> Node:
"""
TD descent
"""
yield ('literal', self, T('int'))
class Id(Terminal):
# to connect Inference
def type_algos(self):
return (self.infer_id, self.value, self.feedback_id)
class Operator(Terminal):
# to connect Inference
def type_algos(self):
return (self.infer_id, self.value, self.feedback_leaf)
def createFunWithTranslator(old: Node, trans: Translator) -> Node:
"""
To alter AST when apply a translator
"""
f = trans.fun
n = trans.notify
return Expr(Id(f.name), [old])
class Expr(NodeInfo):
def __init__(self, ce: 'expr', p: ['expr']):
super().__init__()
self.call_expr = ce
self.p = p
def to_tl4t(self):
params = []
for p in self.p:
params.append(p.to_tl4t())
parenth = fmt.block('(', ')', fmt.sep(', ', params))
lsblock = fmt.sep('', [
self.call_expr.to_tl4t(),
parenth
])
return lsblock
# to connect Inference
def type_algos(self):
return (self.infer_fun, (self.call_expr, self.p), self.feedback_fun)
def walk(self) -> Node:
"""
TD descent
"""
yield ('fun', (it1 for it1 in chain(self.call_expr.walk(), (it2.walk() for it2 in self.p))))
class ExprStmt(NodeInfo):
def __init__(self, e: Expr):
super().__init__()
self.expr = e
def to_tl4t(self):
return fmt.end(";\n", [self.expr.to_tl4t()])
# to connect Inference
def type_algos(self):
return (self.infer_subexpr, self.expr, self.feedback_subexpr)
def walk(self) -> Node:
"""
TD descent
"""
yield ('block', self.expr.walk())
class Binary(Expr):
def __init__(self, left: Expr, op: Operator, right: Expr):
super().__init__(op, [left, right])
def to_tl4t(self):
return fmt.sep(
" ",
[
self.p[0].to_tl4t(),
self.call_expr.to_tl4t(),
self.p[1].to_tl4t()
]
)
class Unary(Expr):
def __init__(self, op: Operator, expr: Expr):
super().__init__(op, [expr])
def to_tl4t(self):
return fmt.sep("", [self.call_expr.to_tl4t(), self.p[0].to_tl4t()])
class Paren(Expr):
def __init__(self, expr: Expr):
super().__init__(None, [expr])
def to_tl4t(self):
return fmt.block("(", ")", [self.p[0].to_tl4t()])
### PARSING
TL4T = grammar.from_file(os.getcwd() + "/tests/bnf/tl4t.bnf", 'source')
# AST NODES
@meta.hook(TL4T)
def info(self, n):
n.info = LocationInfo.from_stream(self._stream, is_error=self.from_string)
return True
@meta.hook(TL4T)
def new_declvar(self, ast, n, t, e, i):
typ = None
txt = self.value(t)
if txt != "":
typ = txt
expr = None
if type(e) is not Node:
expr = e
ast.set(DeclVar(self.value(n), typ, expr))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_declfun(self, ast, n, t, p, b, i):
param = None
expr = None
if b is not None and hasattr(b, 'body'):
expr = b
if hasattr(p, 'node'):
param = p.node
variadic = False
if hasattr(p, 'variadic'):
variadic = True
ast.set(DeclFun(self.value(n), self.value(t), param, expr, variadic))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_rootstmt(self, block, s, i):
if not isinstance(block, BlockStmt):
block.set(BlockStmt(True))
block.body.append(s)
block.info = i.info
return True
@meta.hook(TL4T)
def new_stmt(self, block, s, i):
if not isinstance(block, BlockStmt):
block.set(BlockStmt())
block.body.append(s)
block.info = i.info
return True
@meta.hook(TL4T)
def add_param(self, params, p):
if not hasattr(params, 'node'):
params.node = []
params.node.append(p)
return True
@meta.hook(TL4T)
def add_param_variadic(self, params):
params.variadic = True
return True
@meta.hook(TL4T)
def new_param(self, ast, n, t, i):
ast.set(Param(self.value(n), self.value(t)))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_expr_stmt(self, ast, e, i):
ast.set(ExprStmt(e))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_lhs_rhs(self, ast, op, right, i):
if not hasattr(ast, 'priority') or ast.priority >= op.priority:
left = Node()
left.set(ast)
ast.set(Binary(left, op, right))
ast.info = i.info
ast.priority = op.priority
elif ast.priority < op.priority:
left = ast.p[-1]
ast.p[-1] = Binary(left, op, right)
ast.p[-1].info = i.info
ast.p[-1].priority = op.priority
return True
@meta.hook(TL4T)
def new_binary(self, ast, op, right, i):
left = Node()
left.set(ast)
ast.set(Binary(left, op, right))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_unary(self, ast, op, expr, i):
ast.set(Unary(op, expr))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_func_call(self, ast, fun, args, i):
if hasattr(args, 'list'):
ast.set(Expr(fun, args.list))
else:
ast.set(Expr(fun, []))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_arg(self, ast, arg):
if not hasattr(ast, 'list'):
ast.list = []
ast.list.append(arg)
return True
@meta.hook(TL4T)
def new_paren(self, ast, expr, i):
ast.set(Paren(expr))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_literal(self, ast, val, t, i):
ast.set(Literal(self.value(val), t.value))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_id(self, ast, ident, i):
ast.set(Id(self.value(ident)))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_operator(self, ast, op, i):
ast.set(Operator(self.value(op)))
ast.info = i.info
return True
@meta.hook(TL4T)
def new_prio_operator(self, ast, op, p, a, i):
ast.set(Operator(self.value(op)))
ast.priority = p.value
ast.assoc = a.value
ast.info = i.info
return True
| gpl-3.0 | -6,879,433,769,114,857,000 | 23.065678 | 100 | 0.540453 | false |
FinHackChamp/FinHack | rnn.py | 1 | 15684 | import numpy as np
import pandas as pd
import tensorflow as tf
import time
import csv
from random import shuffle
import random
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn import metrics
from math import sqrt
user_name = 'David Mendoza'
step_size = 40
batch_size = 5
# (2975, 40, 150)
train_data = np.load('oneHotEncoded.npy')
sample = None
# flags
tf.flags.DEFINE_float("epsilon", 0.1, "Epsilon value for Adam Optimizer.")
tf.flags.DEFINE_float("l2_lambda", 0.3, "Lambda for l2 loss.")
tf.flags.DEFINE_float("learning_rate", 0.1, "Learning rate")
tf.flags.DEFINE_float("max_grad_norm", 20.0, "Clip gradients to this norm.")
tf.flags.DEFINE_float("keep_prob", 0.6, "Keep probability for dropout")
tf.flags.DEFINE_integer("hidden_layer_num", 1, "The number of hidden layers (Integer)")
tf.flags.DEFINE_integer("hidden_size", 200, "The number of hidden nodes (Integer)")
tf.flags.DEFINE_integer("evaluation_interval", 5, "Evaluate and print results every x epochs")
tf.flags.DEFINE_integer("epochs", 30, "Number of epochs to train for.")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_string("train_data_path", 'data/0910_b_train.csv', "Path to the training dataset")
tf.flags.DEFINE_string("test_data_path", 'data/0910_b_test.csv', "Path to the testing dataset")
log_file_path = '1layered.txt'
hidden_state_path = 'hidden_stateb2.npy'
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.name_scope( name, "add_gradient_noise",[t, stddev]) as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
class UserModel(object):
def __init__(self, is_training, config, graph):
self.state_size = config.state_size
self._batch_size = batch_size = config.batch_size
self.num_skills = num_skills = config.num_skills
self.hidden_layer_num = len(self.state_size)
self.hidden_size = size = FLAGS.hidden_size
self.num_steps = num_steps = config.num_steps
input_size = num_skills
inputs = self._input_data = tf.placeholder(tf.float32, [batch_size, num_steps-1, 150])
# self._target_id = target_id = tf.placeholder(tf.int32, [1248])
self._target_correctness = target_correctness = tf.placeholder(tf.float32, [batch_size*(num_steps-1), 150])
final_hidden_size = self.state_size[-1]
hidden_layers = []
for i in range(self.hidden_layer_num):
hidden1 = tf.contrib.rnn.BasicLSTMCell(self.state_size[i], state_is_tuple=True,reuse=tf.get_variable_scope().reuse)
if is_training and config.keep_prob < 1:
hidden1 = tf.contrib.rnn.DropoutWrapper(hidden1, output_keep_prob=FLAGS.keep_prob)
hidden_layers.append(hidden1)
cell = tf.contrib.rnn.MultiRNNCell(hidden_layers, state_is_tuple=True)
x = inputs
print x
outputs, state = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
# print state
#output = [batch_size * num_steps, final_hidden_size]
output = tf.reshape(tf.concat(outputs,1), [-1, final_hidden_size])
# calculate the logits from last hidden layer to output layer
# graph.get_tensor_by_name("op_to_restore:0")
sigmoid_w = tf.get_variable("sigmoid_w", [final_hidden_size, num_skills])
sigmoid_b = tf.get_variable("sigmoid_b", [num_skills])
if config.isTrain == False:
sigmoid_w = graph.get_tensor_by_name("model/sigmoid_w:0")
sigmoid_b = graph.get_tensor_by_name("model/sigmoid_b:0")
logits = tf.matmul(output, sigmoid_w) + sigmoid_b
self._logits = logits
print logits
softmaxed_logits = tf.nn.softmax(logits)
#make prediction
self._pred = self._pred_values = pred_values = softmaxed_logits
self._pred_class = tf.argmax(softmaxed_logits, axis = 1)
print self.pred
# loss function
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits = logits,labels= target_correctness))
self._final_state = state
self._cost = cost = loss
@property
def batch_size(self):
return self._batch_size
@property
def input_data(self):
return self._input_data
@property
def auc(self):
return self._auc
@property
def pred(self):
return self._pred
@property
def logits(self):
return self._logits
@property
def target_id(self):
return self._target_id
@property
def target_correctness(self):
return self._target_correctness
@property
def initial_state(self):
return self._initial_state
@property
def pred_values(self):
return self._pred_values
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def pred_class(self):
return self._pred_class
class HyperParamsConfig(object):
"""Small config."""
init_scale = 0.05
num_steps = 0
max_grad_norm = FLAGS.max_grad_norm
max_max_epoch = FLAGS.epochs
keep_prob = FLAGS.keep_prob
num_skills = 0
state_size = [200]
batch_size = 32
isTrain = True
def run_epoch(session, m, students, eval_op, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
index = 0
pred_labels = []
actual_labels = []
pred_classes= []
last_logit = None
for i in range(int(students.shape[0] / m.batch_size)):
target_id = []
target_correctness = []
x = students[i*m.batch_size: (i+1) * m.batch_size,:-1,:]
for b in range(m.batch_size):
for s in range(m.num_steps-1):
# get next time step
index = (list(students[b][s+1]).index(1))
# print index
target_id.append(b * m.num_steps + s * m.num_skills + index)
# print target_id
target_correctness.append(students[b][s+1])
actual_labels.append(students[b][s+1])
pred, _, final_state, pred_class, last_logit = session.run([m.pred, eval_op, m.final_state, m.pred_class, m.logits], feed_dict={
m.input_data: x, m.target_correctness: target_correctness})
#h: [batch_size, num_unit]
h = final_state[0][1]
for i in range(len(final_state)):
if i == 0: continue
h = np.concatenate((h,final_state[i][1]), axis=1)
index += m.batch_size
for p in pred:
pred_labels.append(p)
for p in pred_class:
pred_classes.append(p)
# print final_state[0][0].shape
print np.array(pred_labels).shape
print np.array(actual_labels).shape
actual_classes = np.argmax(actual_labels, axis=1)
# print actual_classes
correct_prediction = [actual_classes[i] == pred_classes[i] for i in range(len(pred_classes))]
# print correct_prediction
accuracy = (sum(correct_prediction) + 0.0) / len(correct_prediction)
rmse = sqrt(mean_squared_error(actual_labels, pred_labels))
# fpr, tpr, thresholds = metrics.roc_curve(actual_labels, pred_labels, pos_label=1)
# auc = metrics.auc(fpr, tpr)
last_logit = last_logit[-1, :]
#calculate r^2
r2 = r2_score(actual_labels, pred_labels)
return rmse, accuracy, r2, final_state, last_logit
def train():
config = HyperParamsConfig()
config.isTrain = True
config.batch_size = 32
eval_config = HyperParamsConfig()
timestamp = str(time.time())
train_data_path = FLAGS.train_data_path
#path to your test data set
test_data_path = FLAGS.test_data_path
#the file to store your test results
result_file_path = "run_logs_{}".format(timestamp)
train_max_num_problems, train_max_skill_num = (40, 150)
train_students = np.load('oneHotEncoded.npy')
config.num_steps = train_max_num_problems
config.num_skills = train_max_skill_num
# test_students, test_max_num_problems, test_max_skill_num = read_data_from_csv_file(test_data_path)
# eval_config.num_steps = test_max_num_problems
# eval_config.num_skills = test_max_skill_num
with tf.Graph().as_default() as g:
session_conf = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
global_step = tf.Variable(0, name="global_step", trainable=False)
# decay learning rate
starter_learning_rate = FLAGS.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 3000, 0.96, staircase=True)
with tf.Session(config=session_conf) as session:
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
# training model
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = UserModel(is_training=True, config=config, graph=g)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=FLAGS.epsilon)
# # testing model
# with tf.variable_scope("model", reuse=True, initializer=initializer):
# mtest = UserModel(is_training=False, config=eval_config)
grads_and_vars = optimizer.compute_gradients(m.cost)
grads_and_vars = [(tf.clip_by_norm(g, FLAGS.max_grad_norm), v)
for g, v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g, v in grads_and_vars]
train_op = optimizer.apply_gradients(grads_and_vars, name="train_op", global_step=global_step)
session.run(tf.global_variables_initializer())
# print tf.get_collection(tf.GraphKeys.VARIABLES, scope='model')
# print "--------------------"
# saver = tf.train.import_meta_graph('user_model-1000.meta')
# saver.restore(session,tf.train.latest_checkpoint('./'))
# print tf.get_collection(tf.GraphKeys.VARIABLES, scope='model')
# print(session.run('model/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/biases/Adam:0'))
# log hyperparameters to results file
with open(result_file_path, "a+") as f:
print("Writing hyperparameters into file")
f.write("Hidden layer size: %d \n" % (FLAGS.hidden_size))
f.write("Dropout rate: %.3f \n" % (FLAGS.keep_prob))
f.write("Batch size: %d \n" % (config.batch_size))
f.write("Max grad norm: %d \n" % (FLAGS.max_grad_norm))
# saver = tf.train.Saver(tf.all_variables())
cs = []
hs = []
for i in range(config.max_max_epoch):
rmse, accuracy, r2, final_state, _ = run_epoch(session, m, train_students, train_op, verbose=True)
print("Epoch: %d Train Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f \n" % (i + 1, rmse, accuracy, r2))
with open(log_file_path, "a+") as f:
f.write("Epoch: %d Train Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f \n" % (i + 1, rmse, accuracy, r2))
if((i+1) % FLAGS.evaluation_interval == 0):
print "Save variables to disk"
saver = tf.train.Saver()
saver.save(session, 'user_model',global_step=1000)
def predict():
config = HyperParamsConfig()
config.isTrain = False
config.batch_size = 1
eval_config = HyperParamsConfig()
timestamp = str(time.time())
train_data_path = FLAGS.train_data_path
#path to your test data set
test_data_path = FLAGS.test_data_path
#the file to store your test results
result_file_path = "run_logs_{}".format(timestamp)
train_max_num_problems, train_max_skill_num = (40, 150)
train_students = np.array([np.load('oneHotEncoded.npy')[np.random.randint(2975),:,:]])
# train_students = np.load('oneHotEncoded.npy')
config.num_steps = train_max_num_problems
config.num_skills = train_max_skill_num
# test_students, test_max_num_problems, test_max_skill_num = read_data_from_csv_file(test_data_path)
# eval_config.num_steps = test_max_num_problems
# eval_config.num_skills = test_max_skill_num
new_graph = tf.Graph()
with new_graph.as_default():
session_conf = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
global_step = tf.Variable(0, name="global_step", trainable=False)
# decay learning rate
starter_learning_rate = FLAGS.learning_rate
with tf.Session(graph = new_graph) as session:
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 3000, 0.96, staircase=True)
# training model
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = UserModel(is_training=True, config=config, graph = new_graph)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=FLAGS.epsilon)
# # testing model
# with tf.variable_scope("model", reuse=True, initializer=initializer):
# mtest = UserModel(is_training=False, config=eval_config)
grads_and_vars = optimizer.compute_gradients(m.cost)
grads_and_vars = [(tf.clip_by_norm(g, FLAGS.max_grad_norm), v)
for g, v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g, v in grads_and_vars]
train_op = optimizer.apply_gradients(grads_and_vars, name="train_op1", global_step=global_step)
session.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph('user_model-1000.meta')
saver.restore(session,tf.train.latest_checkpoint('./'))
# print tf.get_collection(tf.GraphKeys.VARIABLES, scope='model')
# print "--------------------"
# print tf.get_collection(tf.GraphKeys.VARIABLES, scope='model')
# print(session.run('model/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/biases/Adam:0'))
# log hyperparameters to results file
# saver = tf.train.Saver(tf.all_variables())
cs = []
hs = []
rmse, accuracy, r2, final_state, last_logit = run_epoch(session, m, train_students, train_op, verbose=True)
output = []
output.append(np.argmax(last_logit))
last_logit[output[-1]] = min(last_logit)
output.append(np.argmax(last_logit))
last_logit[output[-1]] = min(last_logit)
output.append(np.argmax(last_logit))
df = pd.read_csv('label.csv')
names = list(df.name)
output = [names[index] for index in output]
return output
| mit | 1,610,222,289,992,615,000 | 36.431981 | 137 | 0.62076 | false |
planetmarshall/bmctv_addon | deploy.py | 1 | 1060 | __author__ = 'Andrew'
from zipfile import ZipFile
import os
import os.path
import argparse
includes = [
"resources/img/award.png",
"resources/img/bmc.png",
"resources/img/climbing.png",
"resources/img/competitions.png",
"resources/img/films.png",
"resources/img/gear.png",
"resources/img/kmf.png",
"resources/img/mountaineering.png",
"resources/img/skills.png",
"resources/img/walking.png",
"resources/img/Icons.md",
"addon.xml",
"bmctv.py",
"bmctv_main.py",
"changelog.txt",
"icon.png",
"LICENSE.txt",
"storageserverdummy.py"
]
package = "plugin.video.bmctv"
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--version", help="plugin version")
args = parser.parse_args()
script_dir = os.path.dirname(os.path.realpath(__file__))
current_dir = os.getcwd()
os.chdir(script_dir)
with ZipFile(os.path.join(script_dir,"..", package + "-{0}.zip".format(args.version)), 'w') as zip:
for include in includes:
zip.write(include, os.path.join(package,include))
| apache-2.0 | -5,324,357,215,712,509,000 | 25.5 | 99 | 0.70566 | false |
bn0x/theSkypeResolver | logparse.py | 1 | 2138 | #! -*- coding: utf-8 -*-
#
# Tiny Skype debug log parser.
# Created by aki in about half an hour, at the request of bn0x.
#
# ---------------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 43.5):
# @__akiaki wrote this file. As long as you retain this notice you can do whatever
# you want with this stuff. If we meet some day, and you think this stuff is worth
# it, you can buy me a beer (or caffeinated beverage) in return. --aki
# ---------------------------------------------------------------------------------
"""
Usage:
>>> import logparse
>>> for i in logparse.search("skype.log", "username"):
... print "Public: %s, Local: %s" % (i['public'], i['local'])
...
"Public: 8.56.212.72:41323, Local: 192.168.1.1:41323"
"""
import re
import codecs
def _parseline(line, username=None):
try:
if not (re.search('PresenceManager:', line) and re.search('noticing', line)):
return None
if username != None and not re.search(username, line):
return None
l = re.findall("(\d+\.\d+\.\d+\.\d+)\:(\d+)?", line)
top = 1
if len(l) is 2:
top = 0
public = l[top][0] + ":" + l[top][1][:5 if len(l[top][1]) > 5 else len(l[top][1])]
local = l[top + 1][0] + ":" + l[top + 1][1][:5 if len(l[top + 1][1]) > 5 else len(l[top + 1][1])]
return {'public': public.encode('utf-8'), 'local': local.encode('utf-8')}
except:
return None
def search(file, username=None):
"""
Search the Skype log file `file` for IPs.
Matches against the Skype name `username` if given.
Returns a list of dictionaries, with the keys 'public' and
'local' which are in the format ``aaa.bbb.ccc.ddd:xxxxx``.
"""
buf = []
f = codecs.open(file, 'r', encoding='ascii', errors='ignore').read()
for line in f.split("\n"):
l = _parseline(line.strip(), username)
if l != None:
buf.append(l)
t = []
for b in buf:
if not b in t:
t.append(b)
return t
| gpl-3.0 | 5,480,985,102,098,336,000 | 28.287671 | 105 | 0.505613 | false |
cosminbasca/cysparql | cysparql/test/test_sequence.py | 1 | 1274 | #
# author: Cosmin Basca
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cysparql import *
from base import BaseTestCase
from rdflib.term import *
from cysparql.sequence import Sequence
__author__ = 'basca'
class SequenceTestCase(BaseTestCase):
def test_01_get(self):
query = self.get_query()
self.assertIsInstance(query.vars, Sequence)
def test_02_len(self):
query = self.get_query()
self.assertEqual(len(query.vars), 3)
def test_03_getitem(self):
query = self.get_query()
self.assertEqual(query.vars[0].name, 'label')
def test_04_iterate(self):
query = self.get_query()
for v in query.vars:
self.assertIsInstance(v, QueryVar)
| apache-2.0 | -8,211,821,146,712,578,000 | 29.333333 | 74 | 0.697802 | false |
YoeriDijkstra/iFlow | nifty/__init__.py | 1 | 1948 | """
Native iFlow Tools (NiFTy) package
This package provides tools to be used in iFlow.
You are welcome to add your functions native to iFlow to the package. Please respect the following guidelines:
- Make your tool as general as possible. Particularly allow as much generality in the number and order of dimensions.
- Include only python scripts or cross-platform compiled libraries from other langues.
- Include only one public method or class per file. This method/class should have the same name as the file.
- Include a description of how to use the tool in the docstring
- Document the required type of input and the type of output that may be expected in the docstring.
- Include comments throughout the code, so that interested users may get to understand your script in detail
"""
from toList import toList
from Timer import Timer
from amp_phase_input import amp_phase_input
from arraydot import arraydot
from complexAmplitudeProduct import complexAmplitudeProduct
from derivative import derivative
from dimensionalAxis import dimensionalAxis
from dynamicImport import dynamicImport
from eliminateNegativeFourier import eliminateNegativeFourier
from fft import fft
from harmonicDecomposition import absoluteU, signU
from integrate import integrate
from invfft import invfft
from makeRegularGrid import makeRegularGrid
from pickleload import pickleload
from polyApproximation import polyApproximation
from primitive import primitive
from secondDerivative import secondDerivative
from splitModuleName import splitModuleName
from toMatrix import toMatrix
from scalemax import scalemax
from invfft2 import invfft2
from savitzky_golay import savitzky_golay
# loads all file names in this package that are python files and that do not start with _
#__all__ = [os.path.basename(f)[:-3] for f in os.listdir(os.path.dirname(__file__)) if f.endswith('.py') and not f.startswith('_')]
#for mod in __all__:
# exec('from '+mod+ ' import '+mod)
| lgpl-3.0 | -5,764,286,671,655,567,000 | 44.302326 | 131 | 0.811088 | false |
chrisdickinson/nappingcat | nappingcat/auth.py | 1 | 1933 | from nappingcat.util import import_module
from nappingcat import config
class AuthBackend(object):
def __init__(self, settings):
self.users = {}
self.settings = settings
self.require_update = False
def has_permission(self, user, permission):
full_query = (user,) + tuple(permission)
result = self.users
for i in full_query:
result = result.get(i, {})
return bool(result)
def add_permission(self, user, permission):
self.require_update = True
full_query = (user,) + tuple(permission)
result = self.users
for i in full_query[:-1]:
level = result.get(i, None)
if level is None:
result[i] = {}
result = result[i]
result[full_query[-1]] = True
def remove_permission(self, user, permission):
self.require_update = True
full_query = (user,) + tuple(permission)
result = self.users
for i in full_query[:-1]:
level = result.get(i, None)
if level is None:
result[i] = {}
result = result[i]
del result[full_query[-1]]
def add_user(self, username):
self.require_update = True
self.users[username] = {'keys':[]}
def add_key_to_user(self, user, key):
self.require_update = True
self.users[user]['keys'].append(key)
def get_keys(self, username):
return self.users[username]['keys']
def finish(self, pubkey_handler):
if self.require_update:
pubkey_handler.flush_keys(self)
def get_users(self):
return self.users.keys()
def get_auth_backend_from_settings(settings):
settings_dict = dict(settings.items(config.SECTION_NAME))
module, target = settings_dict['auth'].rsplit('.',1)
module = import_module(module)
target = getattr(module,target)
return target(settings)
| bsd-3-clause | -941,697,860,000,103,000 | 30.177419 | 61 | 0.584584 | false |
ddu7/PyLC | 092Reverse Linked List II.py | 1 | 2067 | # -*- coding: utf-8 -*-
# Reverse a linked list from position m to n. Do it in-place and in one-pass.
#
# For example:
# Given 1->2->3->4->5->NULL, m = 2 and n = 4,
#
# return 1->4->3->2->5->NULL.
#
# Note:
# Given m, n satisfy the following condition:
# 1 <= m <= n <= length of list.
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, repr(self.next))
class Solution:
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
dummy = ListNode(-1)
dummy.next = head
p, q = head, dummy
diff = n - m + 1
while m > 1:
p = p.next
q = q.next
m -= 1
q.next = None
tail = p
# 依然不明白这里用tail的原因,
# 猜测类似tail是指针指向逆转字段逆转前的首位,逆转后的最后一位,那么在最后在这个tail后接上逆转字段完毕的p完成拼接
while diff > 0:
# 逆序解析: 例如 p: 3 - 4 - 5; q: 1 - 2
# 首先记录p.next : temp = 4 - 5
# 将p下一位接上q下一位,此时完成逆序: p : 3 - 2; q: 1 - 2
# 然后将p接上q即可,也就是q.next = p, 拼接: q: 1 - 3 - 2, 还原p为temp: p : 4 -5
# 即完成了一次逆序,后面依次类推
temp = p.next
p.next = q.next
q.next = p
p = temp
diff -= 1
# 还原tail,原因依然不明,指针?
tail.next = p
return dummy.next
if __name__ == "__main__":
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
print Solution().reverseBetween(head, 2, 4)
| mit | 4,131,717,365,873,033,700 | 25.397059 | 89 | 0.477994 | false |
ccri/gensim | gensim/models/lsimodel.py | 1 | 34863 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Module for Latent Semantic Analysis (aka Latent Semantic Indexing) in Python.
Implements scalable truncated Singular Value Decomposition in Python. The SVD
decomposition can be updated with new observations at any time (online, incremental,
memory-efficient training).
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size (though still dependent on the feature set size)
* corpora that are streamed: documents are only accessed sequentially, no
random-access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock `performance on the English Wikipedia <http://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.2M documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import sys
import numpy
import scipy.linalg
import scipy.sparse
from scipy.sparse import sparsetools
from gensim import interfaces, matutils, utils
from six import iterkeys
from six.moves import xrange
logger = logging.getLogger('gensim.models.lsimodel')
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""
Given eigenvalues `s`, return how many factors should be kept to avoid
storing spurious (tiny, numerically instable) values.
This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).
The returned value is clipped against `k` (= never return more than `k`).
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = numpy.abs(1.0 - numpy.cumsum(s / numpy.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(numpy.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)" %
(k, 100 * rel_spectrum[k - 1]))
return k
def asfarray(a, name=''):
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order" % (a.shape, name))
a = numpy.asfortranarray(a)
return a
def ascarray(a, name=''):
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order" % (a.shape, name))
a = numpy.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
"""
Construct the (U, S) projection from a corpus `docs`. The projection can
be later updated by merging it with another Projection via `self.merge()`.
This is the class taking care of the 'core math'; interfacing with corpora,
splitting large corpora into chunks and merging them etc. is done through
the higher-level `LsiModel` class.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(docs, k, chunksize=sys.maxsize,
num_terms=m, power_iters=self.power_iters,
extra_dims=self.extra_dims)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix" % str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
ut, s, vt = sparsesvd.sparsesvd(docs, k + 30) # ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
u = ut.T
del ut, vt
k = clip_spectrum(s**2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
return Projection(self.m, self.k, power_iters=self.power_iters, extra_dims=self.extra_dims)
def merge(self, other, decay=1.0):
"""
Merge this Projection with another.
The content of `other` is destroyed in the process, so pass this function a
copy of `other` if you need it further.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError("vector space mismatch: update is using %s features, expected %s" %
(other.m, self.m))
logger.info("merging projections: %s + %s" % (str(self.u.shape), str(other.u.shape)))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = numpy.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= numpy.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = numpy.bmat([[numpy.diag(decay * self.s), numpy.multiply(c, other.s)],
[matutils.pad(numpy.array([]).reshape(0, 0), min(m, n2), n1), numpy.multiply(r, other.s)]])
logger.debug("computing SVD of %s dense matrix" % str(k.shape))
try:
# in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of numpy, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/[email protected]/msg07224.html and
# bug ticket http://projects.scipy.org/numpy/ticket/706
# sdoering: replaced numpy's linalg.svd with scipy's linalg.svd:
u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :( //sdoering: maybe there is one in scipy?
except scipy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
u_k, s_k, _ = scipy.linalg.svd(numpy.dot(k, k.T), full_matrices=False) # if this fails too, give up with an exception
s_k = numpy.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k**2, self.k)
u1_k, u2_k, s_k = numpy.array(u_k[:n1, :k]), numpy.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = numpy.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = numpy.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in xrange(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
# diff = numpy.dot(self.u.T, self.u) - numpy.eye(self.u.shape[1])
# logger.info('orth error after=%f' % numpy.sum(diff * diff))
#endclass Projection
class LsiModel(interfaces.TransformationABC):
"""
Objects of this class allow building and maintaining a model for Latent
Semantic Indexing (also known as Latent Semantic Analysis).
The main methods are:
1. constructor, which initializes the projection into latent topics space,
2. the ``[]`` method, which returns representation of any input document in the
latent space,
3. `add_documents()` for incrementally updating the model with new documents.
The left singular vectors are stored in `lsi.projection.u`, singular values
in `lsi.projection.s`. Right singular vectors can be reconstructed from the output
of `lsi[training_corpus]`, if needed. See also FAQ [2]_.
Model persistency is achieved via its load/save methods.
.. [2] https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q4-how-do-you-output-the-u-s-vt-matrices-of-lsi
"""
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True,
power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
"""
`num_topics` is the number of requested factors (latent dimensions).
After the model has been trained, you can estimate topics for an
arbitrary, unseen document, using the ``topics = self[document]`` dictionary
notation. You can also add new training documents, with ``self.add_documents``,
so that training can be stopped and resumed at any time, and the
LSI transformation is available at any point.
If you specify a `corpus`, it will be used to train the model. See the
method `add_documents` for a description of the `chunksize` and `decay` parameters.
Turn `onepass` off to force a multi-pass stochastic algorithm.
`power_iters` and `extra_samples` affect the accuracy of the stochastic
multi-pass algorithm, which is used either internally (`onepass=True`) or
as the front-end algorithm (`onepass=False`). Increasing the number of
power iterations improves accuracy, but lowers performance. See [3]_ for
some hard numbers.
Turn on `distributed` to enable distributed computing.
Example:
>>> lsi = LsiModel(corpus, num_topics=10)
>>> print(lsi[doc_tfidf]) # project some document into LSI space
>>> lsi.add_documents(corpus2) # update LSI on additional documents
>>> print(lsi[doc_tfidf])
.. [3] http://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + self.id2word.keys())
self.docs_processed = 0
self.projection = Projection(self.num_terms, self.num_topics, power_iters=self.power_iters, extra_dims=self.extra_samples)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError("distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized.")
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
dispatcher._pyroOneway.add("exit")
logger.debug("looking for dispatcher at %s" % str(dispatcher._pyroUri))
dispatcher.initialize(id2word=self.id2word, num_topics=num_topics,
chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples,
distributed=False, onepass=onepass)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers" % self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)" % err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
self.add_documents(corpus)
def add_documents(self, corpus, chunksize=None, decay=None):
"""
Update singular value decomposition to take into account a new
corpus of documents.
Training proceeds in chunks of `chunksize` documents at a time. The size of
`chunksize` is a tradeoff between increased speed (bigger `chunksize`)
vs. lower memory footprint (smaller `chunksize`). If the distributed mode
is on, each chunk is sent to a different worker/computer.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the
input document stream, by giving less emphasis to old observations. This allows
LSA to gradually "forget" old observations (documents) and give more
preference to new ones.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(self.num_terms, self.num_topics, None)
update.u, update.s = stochastic_svd(corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
else:
# the one-pass algo
doc_no = 0
if self.dispatcher:
logger.info('initializing %s workers' % self.numworkers)
self.dispatcher.reset()
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i" % chunk_no)
self.dispatcher.putjob(job) # put job into queue; this will eventually block, because the queue has a small finite size
del job
logger.info("dispatched documents up to #%s" % doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(self.num_terms, self.num_topics, job, extra_dims=self.extra_samples, power_iters=self.power_iters)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s" % doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
# logger.info("top topics after adding %i documents" % doc_no)
# self.print_debug(10)
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
assert self.onepass, "distributed two-pass algo not supported yet"
update = Projection(self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents" % (corpus.shape[1]))
def __str__(self):
return "LsiModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.decay, self.chunksize)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""
Return latent representation, as a list of (topic_id, topic_value) 2-tuples.
This is done by folding input document into the latent topic space.
If `scaled` is set, scale topics by the inverse of singular values (default: no scaling).
"""
assert self.projection.u is not None, "decomposition not initialized yet"
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform `chunksize` documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# `chunksize` smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
# convert input to scipy.sparse CSC, then do "sparse * dense = dense" multiplication
vec = matutils.corpus2csc(bow, num_terms=self.num_terms, dtype=self.projection.u.dtype)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
# # convert input to dense, then do dense * dense multiplication
# # ± same performance as above (BLAS dense * dense is better optimized than scipy.sparse), but consumes more memory
# vec = matutils.corpus2dense(bow, num_terms=self.num_terms, num_docs=len(bow))
# topic_dist = numpy.dot(self.projection.u[:, :self.num_topics].T, vec)
# # use numpy's advanced indexing to simulate sparse * dense
# # ± same speed again
# u = self.projection.u[:, :self.num_topics]
# topic_dist = numpy.empty((u.shape[1], len(bow)), dtype=u.dtype)
# for vecno, vec in enumerate(bow):
# indices, data = zip(*vec) if vec else ([], [])
# topic_dist[:, vecno] = numpy.dot(u.take(indices, axis=0).T, numpy.array(data, dtype=u.dtype))
if not is_corpus:
# convert back from matrix into a 1d vec
topic_dist = topic_dist.reshape(-1)
if scaled:
topic_dist = (1.0 / self.projection.s[:self.num_topics]) * topic_dist # s^-1 * u^-1 * x
# convert a numpy array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def show_topic(self, topicno, topn=10):
"""
Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`,
as a string.
Return only the `topn` words which contribute the most to the direction
of the topic (both negative and positive).
>>> lsimodel.show_topic(10, topn=5)
[(-0.340, "category"), (0.298, "$M$"), (0.183, "algebra"), (-0.174, "functor"), (-0.168, "operator")]
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = numpy.asarray(self.projection.u.T[topicno, :]).flatten()
norm = numpy.sqrt(numpy.sum(numpy.dot(c, c)))
most = numpy.abs(c).argsort()[::-1][:topn]
return [(1.0 * c[val] / norm, self.id2word[val]) for val in most]
def print_topic(self, topicno, topn=10):
"""
Return a single topic as a formatted string. See `show_topic()` for parameters.
>>> lsimodel.print_topic(10, topn=5)
'-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + -0.174 * "functor" + -0.168 * "operator"'
"""
return ' + '.join(['%.3f*"%s"' % v for v in self.show_topic(topicno, topn)])
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""
Return `num_topics` most significant topics (return all by default).
For each topic, show `num_words` most significant words (10 words by default).
The topics are returned as a list -- a list of strings if `formatted` is
True, or a list of (weight, word) 2-tuples if False.
If `log` is True, also output this result to log.
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in xrange(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append(topic)
if log:
logger.info("topic #%i(%.3f): %s" %
(i, self.projection.s[i],
topic))
return shown
def print_topics(self, num_topics=5, num_words=10):
"""Alias for `show_topics()` which prints the top 5 topics to log."""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def print_debug(self, num_topics=5, num_words=10):
"""
Print (to log) the most salient words of the first `num_topics` topics.
Unlike `print_topics()`, this looks for words that are significant for a
particular topic *and* not for others. This *should* result in a more
human-interpretable description of topics.
"""
# only wrap the module-level fnc
print_debug(self.id2word, self.projection.u, self.projection.s,
range(min(num_topics, len(self.projection.u.T))),
num_words=num_words)
def save(self, fname, *args, **kwargs):
"""
Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
"""
if self.projection is not None:
self.projection.save(fname + '.projection', *args, **kwargs)
super(LsiModel, self).save(fname, *args, ignore=['projection', 'dispatcher'], **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""
Load a previously saved object from file (also see `save`).
Large arrays are mmap'ed back as read-only (shared memory).
"""
kwargs['mmap'] = kwargs.get('mmap', 'r')
result = super(LsiModel, cls).load(fname, *args, **kwargs)
try:
result.projection = super(LsiModel, cls).load(fname + '.projection', *args, **kwargs)
except Exception as e:
logging.warning("failed to load projection from %s: %s" % (fname + '.state', e))
return result
#endclass LsiModel
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics' % len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = numpy.abs(numpy.asarray(uvec).flatten())
udiff = uvec / numpy.sqrt(numpy.sum(numpy.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words" % (num_words, num_neg))
for topic in sorted(iterkeys(result)):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s' % (topic, s[topic], ', '.join(pos), ', '.join(neg)))
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=numpy.float64, eps=1e-6):
"""
Run truncated Singular Value Decomposition (SVD) on a sparse input.
Return (U, S): the left singular vectors and the singular values of the input
data stream `corpus` [4]_. The corpus may be larger than RAM (iterator of vectors).
This may return less than the requested number of top `rank` factors, in case
the input itself is of lower rank. The `extra_dims` (oversampling) and especially
`power_iters` (power iterations) parameters affect accuracy of the decomposition.
This algorithm uses `2+power_iters` passes over the input data. In case you can only
afford a single pass, set `onepass=True` in :class:`LsiModel` and avoid using
this function directly.
The decomposition algorithm is based on
**Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
.. [4] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
corpus fits into core memory and a different (more efficient) code path is chosen.
"""
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations" % (samples - rank, power_iters))
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix" % str(y.shape))
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel()) # y = corpus * o
del o
# unlike numpy, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix" % str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations" % power_iters)
for power_iter in xrange(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i' % (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel())
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in xrange(power_iters):
logger.info("running power iteration #%i" % (power_iter + 1))
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
u, s, vt = scipy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = numpy.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=numpy.float64)
logger.info("2nd phase: constructing %s covariance matrix" % str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += numpy.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix" % str(x.shape))
u, s, vt = scipy.linalg.svd(x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
s = numpy.sqrt(s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s**2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = numpy.dot(q, u)
return u.astype(dtype), s.astype(dtype)
| gpl-3.0 | 2,678,219,108,571,901,400 | 45.481333 | 241 | 0.604486 | false |
squeaky-pl/pystacia | pystacia/lazyenum.py | 1 | 2119 | # coding: utf-8
# pystacia/lazyenum.py
# Copyright (C) 2011-2012 by Paweł Piotr Przeradowski
# This module is part of Pystacia and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from six import string_types
class Enum(object):
def __init__(self, name):
self.name = name
def __getattr__(self, name):
return self.cast(name)
def cast(self, name):
return cast(self, name)
def __str__(self):
return self.name
def __repr__(self):
template = formattable("pystacia.lazyenum.enum('{0}')")
return template.format(self.name)
class EnumValue(object):
def __init__(self, enum, name):
self.enum = enum
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return repr(self.enum) + '.' + self.name
def __eq__(self, other):
try:
other = self.enum.cast(other)
except CastException:
return False
return self is other
def __hash__(self):
return hash((self.enum.name, self.name))
from pystacia.util import memoized
@memoized
def enum(name):
return Enum(name)
@memoized
def enum_value(enum, name):
return EnumValue(enum, name)
def cast(enum_, name):
if isinstance(enum_, Enum):
pass
elif isinstance(enum_, string_types):
enum_ = enum(enum_)
else:
msg = formattable('Cannot cast {0} to Enum')
raise CastException(msg.format(enum_))
if isinstance(name, EnumValue):
if name.enum != enum_:
msg = formattable('Attempted to cast {0} to unrelated Enum {1}')
raise CastException(msg.format(str(name), str(enum_)))
return name
elif isinstance(name, string_types):
return enum_value(enum_, name)
else:
msg = formattable('Cannot cast {0} to EnumValue with Enum {1}')
raise CastException(msg.format(str(name), str(enum_)))
from pystacia.util import PystaciaException
class CastException(PystaciaException):
pass
from pystacia.compat import formattable
| mit | 54,184,110,331,084,984 | 21.531915 | 76 | 0.617564 | false |
rmcauley/rainwave | api_requests/admin/update_user_avatar.py | 1 | 1600 | from libs import db
import api.web
from api.urls import handle_api_url
from api import fieldtypes
from api.exceptions import APIException
from libs import config
@handle_api_url("update_user_avatar_by_discord_id")
class UpdateUserAvatarByDiscordId(api.web.APIHandler):
auth_required = False
sid_required = False
description = "Accessible only to localhost connections, for wormgas."
help_hidden = True
fields = {"discord_user_id": (fieldtypes.string, True), "avatar": (fieldtypes.string, True)}
def post(self):
if self.request.remote_ip not in config.get("api_trusted_ip_addresses"):
raise APIException("auth_failed", f"{self.request.remote_ip} is not allowed to access this endpoint.")
discord_user_id = self.get_argument("discord_user_id")
avatar = self.get_argument("avatar")
avatar_url = f"https://cdn.discordapp.com/avatars/{discord_user_id}/{avatar}.png?size=320"
user_avatar_type = "avatar.driver.remote"
possible_id = db.c.fetch_var(
"SELECT user_id FROM phpbb_users WHERE discord_user_id = %s",
(discord_user_id,),
)
if possible_id:
db.c.update(
(
"UPDATE phpbb_users SET "
" user_avatar_type = %s, "
" user_avatar = %s "
"WHERE user_id = %s"
),
(
user_avatar_type,
avatar_url,
possible_id,
),
)
self.append_standard("yes")
| gpl-2.0 | 2,680,378,987,728,456,000 | 35.363636 | 114 | 0.571875 | false |
NaleRaphael/CalendarParser | core.py | 1 | 16786 | from __future__ import absolute_import, print_function, division
import pandas as pd
import re
import warnings
from datetime import datetime as dt
from datetime import timedelta as td
from config import ConverterConfig
__all__ = []
# TODO: set `datetime_format` by reading from config file
DT_FMT = 'yyyy/MM/dd' # datetime_format
UT_FACTOR = 1000000000
DT_FMT_YMD = '%Y/%m/%d'
DT_FMT_YMDHM = '%Y/%m/%d %H:%M'
DT_DELTA_DAY = 'days'
DT_DELTA_HOUR = 'hours'
DT_FMT_HM = '%H:%M'
class XlsxFile(object):
def __init__(self):
self.df = None
@classmethod
def from_excel(clz, fpath, header=None):
try:
xlsx = clz()
xlsx.df = pd.read_excel(fpath, header=header)
except:
raise
return xlsx
def to_excel(self, opath, header=None):
"""
Parameters
----------
opath : string
Output path.
header : array_like
Column names to be written.
Note
----
Some of `df.columns` might be appended with suffixes if there are
duplicate names.
If users don't want the modified column names, they can overwrite
them by giving `header`.
"""
writer = pd.ExcelWriter(opath, datetime_format=DT_FMT)
self.df.to_excel(writer, index=False)
if header is not None:
pd.DataFrame(header).transpose().to_excel(writer, index=False,
header=False, startrow=0)
writer.save()
ue = lambda x: x.encode('utf-8')
ud = lambda x: x.decode('utf-8')
class EventWriter(object):
def __init__(self):
self.config = ConverterConfig().CsvWriterConfig
self.km_content = self.config.km_content
def write(self, f, obj):
km = self.config.km
events = obj.events
if obj.events is None:
return
# write header
header = [k for k in km]
f.write(','.join(header) + '\n')
# work-around: insert section name into title
# step01: find the index of the header `Subject`
idx = header.index(u'Subject')
# write content
for i in range(len(events)):
# replace all `None` by empty string
temp = [events[i][km[k]] if events[i][km[k]] is not None else ''
for k in km]
temp[idx] = u'{0}:{1}'.format(
self.km_content[obj.__class__.__name__], temp[idx])
# encode utf-8 into unicode
temp = [ue(val) if type(val) is unicode else val for val in temp]
for i, val in enumerate(temp):
if type(val) is dt:
temp[i] = val.strftime(DT_FMT_YMD)
content = map(str, temp)
line = ','.join(content)
f.write(line + '\n')
class Calendar(XlsxFile):
def __init__(self):
super(Calendar, self).__init__()
self.lh = None # left side header
self.df = None
self.config = ConverterConfig().CalendarConfig
@classmethod
def from_excel(clz, fpath):
obj = super(Calendar, clz).from_excel(fpath)
obj.lh = obj.df[0]
obj.df.drop(obj.df.columns[0], axis=1, inplace=True)
return obj
def to_week_schedules(self):
res = []
self._fill_na_theme()
for col in self.df.columns:
ws = WeekSchedule.parse(self.df[col], self.lh)
res.append(ws)
return res
# Helper method, not a generic method
def _fill_na_theme(self):
idx_theme = self.lh.index[self.lh == self.config.km['theme']]
target = self.df.ix[idx_theme].squeeze().tolist()
length = len(target)
# Fill fields in even number by the value of fields in odd number.
target[1::2] = target[:length-length%2:2]
# Re-assign value back to the target
self.df.iloc[idx_theme] = target
class WeekSchedule(object):
def __init__(self):
self.week = ''
self.date = None
self.theme = ''
self.tg = '' # trafic guard
self.admin = None
self.academic = None
self.student = None
self.hygiene = None
self.general = None
self.kingdergarten = None
self.config = ConverterConfig().CalendarConfig
self.hidx = lambda k: self.config.ho[self.config.km[k]]
def keys(self):
return ['week', 'date', 'theme', 'tg', 'admin', 'academic', 'student',
'hygiene', 'general', 'kingdergarten']
def content(self):
return ['theme', 'tg', 'admin', 'academic', 'student', 'hygiene', 'general',
'kingdergarten']
@classmethod
def parse(clz, col, lh, reset_lh=False):
"""
Parameters
----------
col : pandas.Series
Single column of schedule to be parsed.
lh : pandas.Series
Left side header of schedule.
reset_lh : bool
Reset left side header everytime a new object is created by
this method.
Returns
-------
ws : WeekSchedule
"""
ws = WeekSchedule()
# TODO: try to do this automatically
# get the order (index) of left side header
if ws.config.ho is None or reset_lh:
ws.config.set_header_order(lh)
ws.week = col[ws.hidx('week')]
ws.date = EventDate.strptime(col[ws.hidx('date')])
ws.theme = WeekTheme.parse(ws.date, col[ws.hidx('theme')])
ws.tg = TrafficGuard.parse(ws.date,
u'{0} - {1}'.format(ws.week, col[ws.hidx('tg')]))
ws.admin = AdminAffair.parse(col[ws.hidx('admin')], ws.date)
ws.academic = AcademicSection.parse(col[ws.hidx('academic')], ws.date)
ws.student = StudentSection.parse(col[ws.hidx('student')], ws.date)
ws.hygiene = HygieneSection.parse(col[ws.hidx('hygiene')], ws.date)
ws.general = GeneralAffair.parse(col[ws.hidx('general')], ws.date)
ws.kingdergarten = Kingdergarten.parse(col[ws.hidx('kingdergarten')],
ws.date)
return ws
def __getitem__(self, key):
return self.__dict__[key]
def to_csv(self, writer, path, mode='w'):
if mode.lower() not in ['w', 'a']:
raise ValueError('Mode can only be `w`(write) or `a`(append).')
# TODO: export to csv using utf-8 coding
with open(path, mode) as f:
for sec in self.content():
writer.write(f, self[sec])
class EventDate(object):
def __init__(self):
self.begin = None
self.end = None
self.fmt = None
def __repr__(self):
return self.strftime()
@classmethod
def last_day_of_month(clz, date):
next_month = date.replace(day=28) + td(days=4)
return (next_month - td(days=next_month.day)).day
@classmethod
def get_timedelta(clz, ref_date, src):
num = int(src)
if num < ref_date.day:
num += clz.last_day_of_month(ref_date)
delta = {'days': num - ref_date.day}
return td(**delta)
@classmethod
def strptime(clz, src, fmt=None):
wd = clz()
if fmt is None:
fmt = DT_FMT_YMD
wd.fmt = fmt
temp = src.split('~')
wd.begin = dt.strptime(temp[0].strip(), fmt)
if len(temp) == 2:
wd.end = dt.strptime(temp[1].strip(), fmt)
return wd
@classmethod
def parse(clz, ref_date, src_begin, src_end=None):
"""
An week-based datetime parser.
"""
obj = clz()
obj.begin = ref_date + clz.get_timedelta(ref_date, src_begin)
if src_end is not None:
obj.end = ref_date + clz.get_timedelta(ref_date, src_end)
return obj
def strftime(self, fmt=None):
if fmt is None:
fmt = self.fmt if self.fmt is not None else DT_FMT_YMD
if self.end is not None:
return '{0} ~ {1}'.format(self.begin.strftime(fmt),
self.end.strftime(fmt))
else:
return self.begin.strftime(fmt)
class EventTime(object):
def __init__(self, **kwargs):
self.begin = None
self.end = None
@classmethod
def parse(clz, src, fmt=DT_FMT_HM):
temp = src.split('~')
ct = clz()
t = dt.strptime(temp[0].strip(), fmt)
ct.begin = td(hours=t.hour, minutes=t.minute)
if len(temp) == 2:
t = dt.strptime(temp[1].strip(), fmt)
ct.end = td(hours=t.hour, minutes=t.minute)
return ct
class EventDateTime(object):
def __init__(self):
self.bdate = None
self.edate = None
self.btime = None
self.etime = None
@classmethod
def parse(clz, ref_date, src_date, src_time=None, fmt_date=None,
fmt_time=None):
obj = clz()
ed = EventDate.parse(src_date)
obj.bdate, obj.edate = ed.begin, ed.end
if src_time is not None:
et = EventTime.parse(src_time)
obj.btime, obj.etime = et.begin, et.end
return obj
class Event(object):
"""
Note
----
pattern of src:
1. Date is specified
[date] - [title] @[location]? ([description])?
2. No date is specified
*title @[location]? ([description])?
pattern:
\u4e00-\u9fcc :
chinese characters
\uff0c\u3001 :
full word of comma
(^(\d+)\s*\^(\d+\:\d+(\~\d+\:\d+)?)) :
`date ^ HH:mm ~ HH:mm`
(^(\d+)(\s*\~\s*(\d+))?) :
`date_1 ~ date_2`
(\s*\@([\w\u4e00-\u9fcc\uff0c\u3001\,]+))? :
`@[location]`, optional
(\s*\(([\w\u4e00-\u9fcc\uff0c\u3001\,]+)\))? :
`([description])`, optional
"""
pat_time1 = '(^(\d+)\s*\^(\d+\:\d+(\~\d+\:\d+)?))'
pat_time2 = '(^(\d+)(\s*\~\s*(\d+))?)'
pat_time3 = '(^\*)'
pat_title = '([\w\u4e00-\u9fcc\uff0c\u3001\,]+)'
pat_loc = '(\s*\@([\w\u4e00-\u9fcc\uff0c\u3001\,]+))?'
pat_des = '(\s*\(([\w\u4e00-\u9fcc\uff0c\u3001\,]+)\))?'
regex = re.compile(r'({0}|{1}|{2})\s*\-\s*{3}{4}{5}'.format(pat_time1,
pat_time2, pat_time3, pat_title, pat_loc, pat_des), re.UNICODE)
def __init__(self, bdate=None, edate=None, btime=None, etime=None,
title=None, location=None, description=None):
self.bdate = bdate
self.edate = edate
self.btime = btime
self.etime = etime
self.title = title
self.location = location
self.description = description
def __repr__(self):
res = (
u'Event: {0}; bdate: {1}; btime: {2}; edate: {3}; etime: {4}; '
'loc: {5}; des: {6}\n'
).format(
self.title,
self.bdate.strftime(DT_FMT_YMD) if self.bdate is not None else None,
self.btime,
self.edate.strftime(DT_FMT_YMD) if self.edate is not None else None,
self.etime,
self.location,
self.description
)
return res.encode('utf-8')
def __str__(self):
return self.__repr__()
def __getitem__(self, key):
return self.__dict__[key]
def keys(self):
return self.__dict__.keys()
@classmethod
def parse(clz, src, ref_date=None, raise_exception=False,
failed_value=None):
"""
Parameters
----------
src : string
String to be parsed.
ref_date : datetime
Beginning date of a week.
failed_value : object
Value to be returned if regex parser failed.
"""
parts = clz.regex.split(src)
if len(parts) < 15:
msg = src if type(src) is str else src.encode('utf-8')
if not raise_exception:
ParserWarning.warn('')
return failed_value
raise ParserError('Format of string is not correct: {0}'.format(msg))
obj = Event()
if parts[10] == u'*':
# if there is no specified date, use ref_date as its date
obj.bdate = ref_date
elif parts[7] is not None:
# date
ed = EventDate.parse(ref_date, parts[7], parts[9])
obj.bdate = ed.begin
obj.edate = ed.end
# work-around: multiple day event can not be set by only two `date`,
# it need additional `time` info.
# (This bug only occurs when events are imported to Google calendar
# by .csv files)
if ed.end is not None:
et = EventTime.parse('8:00~17:00')
obj.btime = et.begin
obj.etime = et.end
elif parts[3] is not None:
# time
ed = EventDate.parse(ref_date, parts[3])
obj.bdate = ed.begin
et = EventTime.parse(parts[4])
if et.end is not None:
obj.edate = ed.begin
obj.btime = et.begin
obj.etime = et.end
else:
raise ParserError('Cannot parse string correctly.')
# those content which may contains commas should be enclosed by `""`
obj.title = (u'\"{0}\"'.format(parts[11])
if parts[11] is not None else '')
obj.location = parts[13]
obj.description = (u'\"{0}\"'.format(parts[15])
if parts[15] is not None else '')
return obj
class WeekEvent(object):
def __init__(self):
self.events = []
super(WeekEvent, self).__init__()
def __repr__(self):
evnt_content = ';'.join(map(str, self.events))
res = '{0}:\n {1}'.format(self.__class__.__name__, evnt_content)
return res
@classmethod
def parse(clz, week_date, title, location='', description='',
failed_value=None):
"""
Parameters
----------
week_date : EventDate
title : string
location : string
description : string
failed_value :
"""
obj = clz()
evnt = Event()
evnt.bdate = week_date.begin
evnt.edate = week_date.end
# work-around: multiple day event can not be set by only two `date`,
# it need additional `time` info.
# (This bug only occurs when events are imported to Google calendar
# by .csv files)
et = EventTime.parse('8:00~17:00')
evnt.btime = et.begin
evnt.etime = et.end
evnt.title = title
evnt.location = location
evnt.description = description
obj.events.append(evnt)
return obj
class WeekTheme(WeekEvent):
def __init__(self):
super(WeekTheme, self).__init__()
class TrafficGuard(WeekEvent):
def __init__(self):
super(TrafficGuard, self).__init__()
class AffairBase(object):
def __init__(self):
self.events = None
def __repr__(self):
evnt_content = ';'.join(map(str, self.events))
res = '{0}:\n {1}'.format(self.__class__.__name__, evnt_content)
return res
@classmethod
def parse(clz, src, weekdate):
obj = clz()
if type(src) not in [unicode, str]:
obj.event = []
return obj
content = src.split('\n')
try:
temp = [Event.parse(
val,
ref_date=weekdate.begin,
raise_exception=True)
for val in content]
except:
raise
obj.events = [val for val in temp if val is not None]
return obj
class AdminAffair(AffairBase):
def __init__(self):
super(AdminAffair, self).__init__()
class AcademicSection(AffairBase):
def __init__(self):
super(AcademicSection, self).__init__()
class StudentSection(AffairBase):
def __init__(self):
super(StudentSection, self).__init__()
class HygieneSection(AffairBase):
def __init__(self):
super(HygieneSection, self).__init__()
class GeneralAffair(AffairBase):
def __init__(self):
super(GeneralAffair, self).__init__()
class Kingdergarten(AffairBase):
def __init__(self):
super(Kingdergarten, self).__init__()
class ParserError(Exception):
def __init__(self, message='', **kwargs):
super(ParserError, self).__init__(**kwargs)
self.message = 'ParserError: {0}'.format(message)
class ParserWarning(Warning):
def __init__(self, message='', **kwargs):
super(ParserWarning, self).__init__(**kwargs)
self.message = message
class ExceptionMessageCollector(object):
def __init__(self):
self.collected = []
def collect(self, errmsg):
self.collected.append(errmsg)
| mit | 9,012,533,457,759,607,000 | 29.082437 | 85 | 0.533004 | false |
petrvanblokland/Xierpa3 | xierpa3/contributions/filibuster/content/designarticle.py | 1 | 1963 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# Contributed by Erik van Blokland and Jonathan Hoefler
# Original from filibuster.
#
# FILIBUSTER.ORG!
"""
history
A template for new submodules
--------------------------------------------------------------------
"""
__version__ = '3.0.0'
__author__ = "someone"
content = {
'da_text': [
'<#da_statement#> <#da_statement#> <#da_statement#>',
'<#da_statement#> <#da_statement#>',
'<#da_statement#> <#da_statement#>',
'<#da_statement#> <#da_statement#>',
'<#da_statement#>',
],
'da_illustration': ['','','','',' [ill. @@]',],
'da_statement': [
'<#^,design_sentence#><#da_illustration#>',
'<#^,design_sentence#>',
'<#^,design_question#><#da_illustration#>',
'<#^,design_question#> <#^,design_argument#>.<#da_illustration#> <#^,design_counterclaim#>. <#^,design_conclusion#>.',
'<#^,design_claim#>: <#design_counterclaim#>.<#da_illustration#> <#^,design_argument#>. <#^,design_counterclaim#>. <#^,design_conclusion#>.',
'<#^,design_claim#>: <#design_counterclaim#> and <#design_claim#>.<#da_illustration#> <#^,design_argument#>. <#^,design_counterclaim#>. <#^,design_conclusion#>.',
'<#^,design_claim#>, and <#design_claim#>. <#^,design_argument#>. <#^,design_counterclaim#>. <#^,design_conclusion#>.',
'<#^,design_claim#>. <#^,design_argument#>. <#^,design_counterclaim#>. <#^,design_conclusion#>.<#da_illustration#>',
],
}
| mit | -8,157,307,901,914,690,000 | 42.622222 | 178 | 0.447275 | false |
sanacl/GrimoireELK | grimoire/ocean/jenkins.py | 1 | 1077 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# StackExchange Ocean feeder
#
# Copyright (C) 2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <[email protected]>
#
from grimoire.ocean.elastic import ElasticOcean
class JenkinsOcean(ElasticOcean):
"""Jenkins Ocean feeder"""
def _fix_item(self, item):
item["ocean-unique-id"] = item["data"]["url"]
| gpl-3.0 | 1,237,929,760,044,938,800 | 32.65625 | 76 | 0.731662 | false |
makerdao/maker.py | pymaker/feed.py | 1 | 5485 | # This file is part of Maker Keeper Framework.
#
# Copyright (C) 2017-2018 reverendus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from web3 import Web3
from pymaker import Contract, Address, Transact
class DSValue(Contract):
"""A client for the `DSValue` contract, a single-value data feed.
`DSValue` is a single-value data feed, which means it can be in one of two states.
It can either contain a value (in which case `has_value()` returns `True` and the read methods
return that value) or be empty (in which case `has_value()` returns `False` and the read
methods throw exceptions).
`DSValue` can be populated with a new value using `poke()` and cleared using `void()`.
Everybody can read from a `DSValue`.
Calling `poke()` and `void()` is usually whitelisted to some addresses only.
The `DSValue` contract keeps the value as a 32-byte array (Ethereum `bytes32` type).
Methods have been provided to cast it into `int`, read as hex etc.
You can find the source code of the `DSValue` contract here:
<https://github.com/dapphub/ds-value>.
Attributes:
web3: An instance of `Web` from `web3.py`.
address: Ethereum address of the `DSValue` contract.
"""
abi = Contract._load_abi(__name__, 'abi/DSValue.abi')
bin = Contract._load_bin(__name__, 'abi/DSValue.bin')
@staticmethod
def deploy(web3: Web3):
return DSValue(web3=web3, address=Contract._deploy(web3, DSValue.abi, DSValue.bin, []))
def __init__(self, web3: Web3, address: Address):
assert(isinstance(web3, Web3))
assert(isinstance(address, Address))
self.web3 = web3
self.address = address
self._contract = self._get_contract(web3, self.abi, address)
def has_value(self) -> bool:
"""Checks whether this instance contains a value.
Returns:
`True` if this instance contains a value, which can be read. `False` otherwise.
"""
return self._contract.call().peek()[1]
def read(self) -> bytes:
"""Reads the current value from this instance as a byte array.
If this instance does not contain a value, throws an exception.
Returns:
A 32-byte array with the current value of this instance.
"""
return self._contract.call().read()
def read_as_hex(self) -> str:
"""Reads the current value from this instance and converts it to a hex string.
If this instance does not contain a value, throws an exception.
Returns:
A string with a hexadecimal representation of the current value of this instance.
"""
return ''.join(hex(x)[2:].zfill(2) for x in self.read())
def read_as_int(self) -> int:
"""Reads the current value from this instance and converts it to an int.
If the value is actually a `Ray` or a `Wad`, you can convert it to one using `Ray(...)`
or `Wad(...)`. Please see `Ray` or `Wad` for more details.
If this instance does not contain a value, throws an exception.
Returns:
An integer representation of the current value of this instance.
"""
return int(self.read_as_hex(), 16)
def poke(self, new_value: bytes) -> Transact:
"""Populates this instance with a new value.
Args:
new_value: A 32-byte array with the new value to be set.
Returns:
A :py:class:`pymaker.Transact` instance, which can be used to trigger the transaction.
"""
assert(isinstance(new_value, bytes))
assert(len(new_value) == 32)
return Transact(self, self.web3, self.abi, self.address, self._contract, 'poke', [new_value])
def poke_with_int(self, new_value: int) -> Transact:
"""Populates this instance with a new value.
Handles the conversion of a Python `int` into the Solidity `bytes32` type automatically.
If the value you want to set is actually a `Ray` or a `Wad`, you can get the integer value from them
by accessing their `value` property. Please see `Ray` or `Wad` for more details.
Args:
new_value: A non-negative integer with the new value to be set.
Returns:
A :py:class:`pymaker.Transact` instance, which can be used to trigger the transaction.
"""
assert(isinstance(new_value, int))
assert(new_value >= 0)
return self.poke(new_value.to_bytes(32, byteorder='big'))
def void(self) -> Transact:
"""Removes the current value from this instance.
Returns:
A :py:class:`pymaker.Transact` instance, which can be used to trigger the transaction.
"""
return Transact(self, self.web3, self.abi, self.address, self._contract, 'void', [])
def __repr__(self):
return f"DSValue('{self.address}')"
| agpl-3.0 | 7,170,301,331,189,471,000 | 37.356643 | 108 | 0.650684 | false |
ashutosh-mishra/youtube-dl | test/test_youtube_signature.py | 1 | 2330 | #!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re
import string
from youtube_dl.extractor import YoutubeIE
from youtube_dl.utils import compat_str, compat_urlretrieve
_TESTS = [
(
u'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
u'js',
86,
u'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
),
(
u'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
u'js',
85,
u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
),
(
u'https://s.ytimg.com/yts/swfbin/watch_as3-vflg5GhxU.swf',
u'swf',
82,
u':/.-,+*)=\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBAzyxw>utsrqponmlkjihgfedcba987654321'
),
]
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
if not os.path.exists(self.TESTDATA_DIR):
os.mkdir(self.TESTDATA_DIR)
def make_tfunc(url, stype, sig_length, expected_sig):
basename = url.rpartition('/')[2]
m = re.match(r'.*-([a-zA-Z0-9_-]+)\.[a-z]+$', basename)
assert m, '%r should follow URL format' % basename
test_id = m.group(1)
def test_func(self):
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
compat_urlretrieve(url, fn)
ie = YoutubeIE()
if stype == 'js':
with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read()
func = ie._parse_sig_js(jscode)
else:
assert stype == 'swf'
with open(fn, 'rb') as testf:
swfcode = testf.read()
func = ie._parse_sig_swf(swfcode)
src_sig = compat_str(string.printable[:sig_length])
got_sig = func(src_sig)
self.assertEqual(got_sig, expected_sig)
test_func.__name__ = str('test_signature_' + stype + '_' + test_id)
setattr(TestSignature, test_func.__name__, test_func)
for test_spec in _TESTS:
make_tfunc(*test_spec)
if __name__ == '__main__':
unittest.main()
| unlicense | 6,055,073,884,091,018,000 | 27.765432 | 95 | 0.586266 | false |
berkmancenter/mediacloud | apps/common/src/python/mediawords/util/identify_language.py | 1 | 4998 | from typing import Dict
import cld2
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
# Min. text length for reliable language identification
__RELIABLE_IDENTIFICATION_MIN_TEXT_LENGTH = 10
# Don't process strings longer than the following length
__MAX_TEXT_LENGTH = 1024 * 1024
log = create_logger(__name__)
def __create_supported_language_mapping() -> Dict[str, str]:
"""Create and return language code -> language name dict."""
supported_languages_tuple = cld2.LANGUAGES
codes_to_names = dict()
for language_name, language_code in supported_languages_tuple:
# Tuple members are in 'bytes'
language_name = language_name.decode('utf-8').lower()
language_code = language_code.decode('utf-8').lower()
# Don't include "X_Malayalam" and "xx-Mlym"
if language_name.startswith('X_') or language_code.startswith('xx-'):
continue
# Don't include extended languages such as "zh-hant"
if len(language_code) > 3:
continue
codes_to_names[language_code] = language_name
return codes_to_names
__LANGUAGE_CODES_TO_NAMES = __create_supported_language_mapping()
def __recode_utf8_string(utf8_string: str) -> str:
"""Encode and then decode UTF-8 string by removing invalid characters in the process."""
return utf8_string.encode('utf-8', errors='replace').decode('utf-8', errors='replace')
def language_code_for_text(text: str):
"""Returns an ISO 690 language code for the plain text passed as a parameter.
:param text: Text that should be identified
:return: ISO 690 language code (e.g. 'en') on successful identification, empty string ('') on failure
"""
text = decode_object_from_bytes_if_needed(text)
if not text:
return ''
if len(text) > __MAX_TEXT_LENGTH:
log.warning("Text is longer than %d, trimming..." % __MAX_TEXT_LENGTH)
text = text[:__MAX_TEXT_LENGTH]
# We need to verify that the file can cleany encode and decode because CLD can segfault on bad UTF-8
text = __recode_utf8_string(text)
try:
is_reliable, text_bytes_found, details = cld2.detect(utf8Bytes=text, useFullLangTables=True)
except Exception as ex:
log.error("Error while detecting language: %s" % str(ex))
return ''
if not details:
return ''
best_match = details[0]
language_name = best_match.language_name.lower()
language_code = best_match.language_code.lower()
if language_name in {'unknown', 'tg_unknown_language'} or language_code == 'un':
return ''
if not language_is_supported(language_code):
return ''
return language_code
def identification_would_be_reliable(text: str) -> bool:
"""Returns True if the language identification for the text passed as a parameter is likely to be reliable.
:param text: Text that should be identified
:return: True if language identification is likely to be reliable; False otherwise
"""
text = decode_object_from_bytes_if_needed(text)
if not text:
return False
# Too short?
if len(text) < __RELIABLE_IDENTIFICATION_MIN_TEXT_LENGTH:
return False
if len(text) > __MAX_TEXT_LENGTH:
log.warning("Text is longer than %s, trimming..." % __MAX_TEXT_LENGTH)
text = text[:__MAX_TEXT_LENGTH]
text = __recode_utf8_string(text)
# Not enough letters as opposed to non-letters?
word_character_count = 0
digit_count = 0
underscore_count = 0
for character in text:
if character.isalpha():
word_character_count += 1
if character.isdigit():
digit_count += 1
if character == '_':
underscore_count += 1
letter_count = word_character_count - digit_count - underscore_count
if letter_count < __RELIABLE_IDENTIFICATION_MIN_TEXT_LENGTH:
return False
return True
def language_is_supported(code: str) -> bool:
"""Returns True if the language code if supported by the identifier.
:param code: ISO 639-1 language code
:return: True if the language can be identified, False if it can not
"""
code = decode_object_from_bytes_if_needed(code)
if not code:
return False
return code in __LANGUAGE_CODES_TO_NAMES
# MC_REWRITE_TO_PYTHON make it return Union[str, None] after Perl -> Python rewrite
def language_name_for_code(code: str) -> str:
"""Return the human readable language name for a given language code.
:param code: ISO 639-1 language code
:return: Language name, e.g. "Lithuanian", or an empty string ('') if language is not supported
"""
code = decode_object_from_bytes_if_needed(code)
if not code:
return ''
if code not in __LANGUAGE_CODES_TO_NAMES:
return ''
language_name = __LANGUAGE_CODES_TO_NAMES[code]
language_name = language_name.replace('_', ' ')
return language_name.title()
| agpl-3.0 | 6,498,421,074,432,806,000 | 30.043478 | 111 | 0.659664 | false |
Lilykos/pyphonetics | pyphonetics/phonetics/mra.py | 1 | 1163 | import re
from unidecode import unidecode
from ..utils import squeeze, check_empty, check_str
from .phonetic_algorithm import PhoneticAlgorithm
class MatchingRatingApproach(PhoneticAlgorithm):
"""
Functions related to the computation of the Match Rating Approach codex.
[Reference]: https://en.wikipedia.org/wiki/Match_rating_approach
[Article]: Moore, G B.; Kuhns, J L.; Treffzs, J L.; Montgomery, C A. (Feb 1, 1977).
Accessing Individual Records from Personal Data Files Using Nonunique Identifiers.
US National Institute of Standards and Technology. p. 17. NIST SP - 500-2.
"""
def __init__(self):
super().__init__()
def phonetics(self, word):
check_str(word)
check_empty(word)
codex = unidecode(word).upper()
codex = re.sub(r'[^A-Z]', r'', codex)
# Dropping non - leading vowels
codex = codex[0] + re.sub(r'[AEIOU]', r'', codex[1:])
# Dropping consecutive consonants
codex = squeeze(codex)
# Returning the codex
offset = min(3, len(codex) - 3)
return codex[:3] + codex[len(codex) - offset:offset + len(codex)]
| mit | -4,034,487,338,249,134,600 | 32.228571 | 90 | 0.638005 | false |
bytedance/fedlearner | fedlearner/data_join/raw_data_iter_impl/metric_stats.py | 1 | 1198 | import copy
import random
from datetime import datetime
import pytz
from fedlearner.common import metrics, common
from fedlearner.data_join.common import convert_to_str
from fedlearner.common.common import convert_to_datetime
class MetricStats:
def __init__(self, raw_data_options, metric_tags):
self._tags = copy.deepcopy(metric_tags)
self._stat_fields = raw_data_options.optional_fields
self._sample_ratio = common.Config.RAW_DATA_METRICS_SAMPLE_RATE
def emit_metric(self, item):
if random.random() < self._sample_ratio:
tags = copy.deepcopy(self._tags)
for field in self._stat_fields:
value = convert_to_str(getattr(item, field, '#None#'))
tags[field] = value
tags['example_id'] = convert_to_str(item.example_id)
tags['event_time'] = convert_to_datetime(item.event_time, True) \
.isoformat(timespec='microseconds')
tags['process_time'] = datetime.now(tz=pytz.utc) \
.isoformat(timespec='microseconds')
metrics.emit_store(name='input_data', value=0, tags=tags,
index_type='raw_data')
| apache-2.0 | -8,417,883,700,782,550,000 | 38.933333 | 77 | 0.627713 | false |
eavatar/eavatar-me | src/ava_settings.py | 1 | 1423 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
"""
Default settings which can be overridden by configuration files.
"""
GENERAL = {
"DEBUG": False,
}
WEBFRONT = {
"disabled": False,
"listen_port": 5080,
"listen_addr": "127.0.0.1",
"secure_listen_addr": "",
"secure_listen_port": 5443,
}
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple"
},
"file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "simple",
"filename": "${logs_dir}/ava.log",
"maxBytes": 1048576,
"backupCount": 2,
"encoding": "utf8"
}
},
"loggers": {
"ava": {
"level": "DEBUG",
"handlers": [
"console",
"file_handler"
],
"propagate": "no"
},
"avashell": {
"level": "DEBUG",
"handlers": [
"console",
"file_handler"
],
"propagate": "no"
},
"root": {
"level": "DEBUG",
"handlers": [
"console"
]
}
}
}
| apache-2.0 | -4,526,999,297,465,340,000 | 19.926471 | 72 | 0.465214 | false |
KevinGrahamFoster/django-cities-light | cities_light/settings.py | 1 | 7116 | """
Settings for this application. The most important is TRANSLATION_LANGUAGES
because it's probably project specific.
.. py:data:: TRANSLATION_LANGUAGES
List of language codes. It is used to generate the alternate_names property
of cities_light models. You want to keep it as small as possible.
By default, it includes the most popular languages according to wikipedia,
which use a rather ascii-compatible alphabet. It also contains 'abbr' which
stands for 'abbreviation', you might want to include this one as well.
See:
- http://download.geonames.org/export/dump/iso-languagecodes.txt
Example::
CITIES_LIGHT_TRANSLATION_LANGUAGES = ['es', 'en', 'fr', 'abbr']
.. py:data:: INCLUDE_COUNTRIES
List of country codes to include. It's None by default which lets all
countries in the database. But if you only wanted French and Belgium
countries/regions/cities, you could set it as such::
CITIES_LIGHT_INCLUDE_COUNTRIES = ['FR', 'BE']
.. py:data:: INCLUDE_CITY_TYPES
List of city feature codes to include. They are described at
http://www.geonames.org/export/codes.html, section "P city, village".
CITIES_LIGHT_INCLUDE_CITY_TYPES = [
'PPL', 'PPLA', 'PPLA2', 'PPLA3', 'PPLA4', 'PPLC',
'PPLF', 'PPLG', 'PPLL', 'PPLR', 'PPLS', 'STLMT',
]
.. py:data:: COUNTRY_SOURCES
A list of urls to download country info from. Default is countryInfo.txt
from geonames download server. Overridable in
``settings.CITIES_LIGHT_COUNTRY_SOURCES``.
.. py:data:: REGION_SOURCES
A list of urls to download region info from. Default is
admin1CodesASCII.txt from geonames download server. Overridable in
``settings.CITIES_LIGHT_REGION_SOURCES``.
.. py:data:: CITY_SOURCES
A list of urls to download city info from. Default is cities15000.zip from
geonames download server. Overridable in
``settings.CITIES_LIGHT_CITY_SOURCES``.
.. py:data:: TRANSLATION_SOURCES
A list of urls to download alternate names info from. Default is
alternateNames.zip from geonames download server. Overridable in
``settings.CITIES_LIGHT_TRANSLATION_SOURCES``.
.. py:data:: SOURCES
A list with all sources, auto-generated.
.. py:data:: FIXTURES_BASE_URL
Base URL to download country/region/city fixtures from. Should end
with a slash. Default is ``file://DATA_DIR/fixtures/``. Overridable in
``settings.CITIES_LIGHT_FIXTURES_BASE_URL``.
.. py:data:: DATA_DIR
Absolute path to download and extract data into. Default is
cities_light/data. Overridable in ``settings.CITIES_LIGHT_DATA_DIR``
.. py:data:: INDEX_SEARCH_NAMES
If your database engine for cities_light supports indexing TextFields (ie.
it is **not** MySQL), then this should be set to True. You might have to
override this setting with ``settings.CITIES_LIGHT_INDEX_SEARCH_NAMES`` if
using several databases for your project.
.. py:data:: CITIES_LIGHT_APP_NAME
Modify it only if you want to define your custom cities models, that
are inherited from abstract models of this package.
It must be equal to app name, where custom models are defined.
For example, if they are in geo/models.py, then set
``settings.CITIES_LIGHT_APP_NAME = 'geo'``.
Note: you can't define one custom model, you have to define all of
cities_light models, even if you want to modify only one.
"""
from __future__ import unicode_literals
import os.path
from django.conf import settings
__all__ = [
'FIXTURES_BASE_URL', 'COUNTRY_SOURCES', 'REGION_SOURCES', 'CITY_SOURCES',
'TRANSLATION_LANGUAGES', 'TRANSLATION_SOURCES', 'SOURCES', 'DATA_DIR',
'INDEX_SEARCH_NAMES', 'INCLUDE_COUNTRIES', 'INCLUDE_CITY_TYPES',
'DEFAULT_APP_NAME', 'CITIES_LIGHT_APP_NAME',
'ICountry', 'IRegion', 'ICity', 'IAlternate']
COUNTRY_SOURCES = getattr(settings, 'CITIES_LIGHT_COUNTRY_SOURCES',
['http://download.geonames.org/export/dump/countryInfo.txt'])
REGION_SOURCES = getattr(settings, 'CITIES_LIGHT_REGION_SOURCES',
['http://download.geonames.org/export/dump/admin1CodesASCII.txt'])
CITY_SOURCES = getattr(settings, 'CITIES_LIGHT_CITY_SOURCES',
['http://download.geonames.org/export/dump/cities15000.zip'])
TRANSLATION_SOURCES = getattr(settings, 'CITIES_LIGHT_TRANSLATION_SOURCES',
['http://download.geonames.org/export/dump/alternateNames.zip'])
TRANSLATION_LANGUAGES = getattr(settings, 'CITIES_LIGHT_TRANSLATION_LANGUAGES',
['es', 'en', 'pt', 'de', 'pl', 'abbr'])
SOURCES = list(COUNTRY_SOURCES) + list(REGION_SOURCES) + list(CITY_SOURCES)
SOURCES += TRANSLATION_SOURCES
DATA_DIR = getattr(settings, 'CITIES_LIGHT_DATA_DIR',
os.path.normpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'data')))
INCLUDE_COUNTRIES = getattr(settings, 'CITIES_LIGHT_INCLUDE_COUNTRIES', None)
# Feature codes are described in the "P city, village" section at
# http://www.geonames.org/export/codes.html
INCLUDE_CITY_TYPES = getattr(
settings,
'CITIES_LIGHT_INCLUDE_CITY_TYPES',
['PPL', 'PPLA', 'PPLA2', 'PPLA3', 'PPLA4', 'PPLC',
'PPLF', 'PPLG', 'PPLL', 'PPLR', 'PPLS', 'STLMT']
)
# MySQL doesn't support indexing TextFields
INDEX_SEARCH_NAMES = getattr(settings, 'CITIES_LIGHT_INDEX_SEARCH_NAMES', None)
if INDEX_SEARCH_NAMES is None:
INDEX_SEARCH_NAMES = True
for database in list(settings.DATABASES.values()):
if 'mysql' in database['ENGINE'].lower():
INDEX_SEARCH_NAMES = False
DEFAULT_APP_NAME = 'cities_light'
CITIES_LIGHT_APP_NAME = getattr(settings, 'CITIES_LIGHT_APP_NAME',
DEFAULT_APP_NAME)
FIXTURES_BASE_URL = getattr(
settings,
'CITIES_LIGHT_FIXTURES_BASE_URL',
'file://{0}'.format(os.path.join(DATA_DIR, 'fixtures/'))
)
class ICountry:
"""
Country field indexes in geonames.
"""
code = 0
code3 = 1
codeNum = 2
fips = 3
name = 4
capital = 5
area = 6
population = 7
continent = 8
tld = 9
currencyCode = 10
currencyName = 11
phone = 12
postalCodeFormat = 13
postalCodeRegex = 14
languages = 15
geonameid = 16
neighbours = 17
equivalentFips = 18
class IRegion:
"""
Region field indexes in geonames.
"""
code = 0
name = 1
asciiName = 2
geonameid = 3
class ICity:
"""
City field indexes in geonames.
Description of fields: http://download.geonames.org/export/dump/readme.txt
"""
geonameid = 0
name = 1
asciiName = 2
alternateNames = 3
latitude = 4
longitude = 5
featureClass = 6
featureCode = 7
countryCode = 8
cc2 = 9
admin1Code = 10
admin2Code = 11
admin3Code = 12
admin4Code = 13
population = 14
elevation = 15
gtopo30 = 16
timezone = 17
modificationDate = 18
class IAlternate:
"""
Alternate names field indexes in geonames.
Description of fields: http://download.geonames.org/export/dump/readme.txt
"""
nameid = 0
geonameid = 1
language = 2
name = 3
isPreferred = 4
isShort = 5
isColloquial = 6
isHistoric = 7
| mit | 3,973,901,557,207,205,000 | 30.074236 | 79 | 0.68086 | false |
xchen101/analysis-preservation.cern.ch | cap/modules/experiments/permissions/__init__.py | 1 | 1447 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CAP deposit permissions"""
from alice import alice_permission
from atlas import atlas_permission
from cms import cms_permission
from lhcb import lhcb_permission
collaboration_permissions = {
'ALICE': alice_permission,
'ATLAS': atlas_permission,
'CMS': cms_permission,
'LHCb': lhcb_permission,
}
__all__ = ['collaboration_permissions']
| gpl-2.0 | -6,753,410,607,367,365,000 | 34.292683 | 78 | 0.758811 | false |
henrysher/aws-cloudinit | cloudinit/util.py | 1 | 53944 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
# Copyright (C) 2014 Amazon.com, Inc. or its affiliates.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
# Author: Joshua Harlow <[email protected]>
# Author: Andrew Jorgensen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# pylint: disable=C0302
from StringIO import StringIO
import base64
import contextlib
import copy as obj_copy
import errno
import glob
import grp
import gzip
import hashlib
import os
import platform
import pwd
import re
import random
import shutil
import socket
import stat
import string # pylint: disable=W0402
import subprocess
import sys
import tempfile
import time
import urlparse
import yaml
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import mergers
from cloudinit import safeyaml
from cloudinit import type_utils
from cloudinit import url_helper
from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
# Helps cleanup filenames to ensure they aren't FS incompatible
FN_REPLACEMENTS = {
os.sep: '_',
}
FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
# Helper utils to see if running in a container
CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
# An imperfect, but close enough regex to detect Base64 encoding
BASE64 = re.compile('^[A-Za-z0-9+/\-_\n]+=?=?$')
# Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference.
class StringResponse(object):
def __init__(self, contents, code=200):
self.code = code
self.headers = {}
self.contents = contents
self.url = None
def ok(self, *args, **kwargs): # pylint: disable=W0613
if self.code != 200:
return False
return True
def __str__(self):
return self.contents
class FileResponse(StringResponse):
def __init__(self, path, contents, code=200):
StringResponse.__init__(self, contents, code=code)
self.url = path
class ProcessExecutionError(IOError):
MESSAGE_TMPL = ('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Reason: %(reason)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r')
def __init__(self, stdout=None, stderr=None,
exit_code=None, cmd=None,
description=None, reason=None):
if not cmd:
self.cmd = '-'
else:
self.cmd = cmd
if not description:
self.description = 'Unexpected error while running command.'
else:
self.description = description
if not isinstance(exit_code, (long, int)):
self.exit_code = '-'
else:
self.exit_code = exit_code
if not stderr:
self.stderr = ''
else:
self.stderr = stderr
if not stdout:
self.stdout = ''
else:
self.stdout = stdout
if reason:
self.reason = reason
else:
self.reason = '-'
message = self.MESSAGE_TMPL % {
'description': self.description,
'cmd': self.cmd,
'exit_code': self.exit_code,
'stdout': self.stdout,
'stderr': self.stderr,
'reason': self.reason,
}
IOError.__init__(self, message)
class SeLinuxGuard(object):
def __init__(self, path, recursive=False):
# Late import since it might not always
# be possible to use this
try:
self.selinux = importer.import_module('selinux')
except ImportError:
self.selinux = None
self.path = path
self.recursive = recursive
def __enter__(self):
if self.selinux and self.selinux.is_selinux_enabled():
return True
else:
return False
def __exit__(self, excp_type, excp_value, excp_traceback):
if self.selinux and self.selinux.is_selinux_enabled():
path = os.path.realpath(os.path.expanduser(self.path))
do_restore = False
try:
# See if even worth restoring??
stats = os.lstat(path)
if stat.ST_MODE in stats:
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
do_restore = True
except OSError:
pass
if do_restore:
LOG.debug("Restoring selinux mode for %s (recursive=%s)",
path, self.recursive)
self.selinux.restorecon(path, recursive=self.recursive)
class MountFailedError(Exception):
pass
class DecodingError(Exception):
pass
class DecompressionError(Exception):
pass
def ExtendedTemporaryFile(**kwargs):
fh = tempfile.NamedTemporaryFile(**kwargs)
# Replace its unlink with a quiet version
# that does not raise errors when the
# file to unlink has been unlinked elsewhere..
LOG.debug("Created temporary file %s", fh.name)
fh.unlink = del_file
# Add a new method that will unlink
# right 'now' but still lets the exit
# method attempt to remove it (which will
# not throw due to our del file being quiet
# about files that are not there)
def unlink_now():
fh.unlink(fh.name)
setattr(fh, 'unlink_now', unlink_now)
return fh
def fork_cb(child_cb, *args):
fid = os.fork()
if fid == 0:
try:
child_cb(*args)
os._exit(0) # pylint: disable=W0212
except:
logexc(LOG, ("Failed forking and"
" calling callback %s"),
type_utils.obj_name(child_cb))
os._exit(1) # pylint: disable=W0212
else:
LOG.debug("Forked child %s who will run callback %s",
fid, type_utils.obj_name(child_cb))
def is_true(val, addons=None):
if isinstance(val, (bool)):
return val is True
check_set = ['true', '1', 'on', 'yes']
if addons:
check_set = check_set + addons
if str(val).lower().strip() in check_set:
return True
return False
def is_false(val, addons=None):
if isinstance(val, (bool)):
return val is False
check_set = ['off', '0', 'no', 'false']
if addons:
check_set = check_set + addons
if str(val).lower().strip() in check_set:
return True
return False
def translate_bool(val, addons=None):
if not val:
# This handles empty lists and false and
# other things that python believes are false
return False
# If its already a boolean skip
if isinstance(val, (bool)):
return val
return is_true(val, addons)
def rand_str(strlen=32, select_from=None):
if not select_from:
select_from = string.letters + string.digits
return "".join([random.choice(select_from) for _x in range(0, strlen)])
def read_conf(fname):
try:
return load_yaml(load_file(fname), default={})
except IOError as e:
if e.errno == errno.ENOENT:
return {}
else:
raise
# Merges X lists, and then keeps the
# unique ones, but orders by sort order
# instead of by the original order
def uniq_merge_sorted(*lists):
return sorted(uniq_merge(*lists))
# Merges X lists and then iterates over those
# and only keeps the unique items (order preserving)
# and returns that merged and uniqued list as the
# final result.
#
# Note: if any entry is a string it will be
# split on commas and empty entries will be
# evicted and merged in accordingly.
def uniq_merge(*lists):
combined_list = []
for a_list in lists:
if isinstance(a_list, (str, basestring)):
a_list = a_list.strip().split(",")
# Kickout the empty ones
a_list = [a for a in a_list if len(a)]
combined_list.extend(a_list)
return uniq_list(combined_list)
def clean_filename(fn):
for (k, v) in FN_REPLACEMENTS.iteritems():
fn = fn.replace(k, v)
removals = []
for k in fn:
if k not in FN_ALLOWED:
removals.append(k)
for k in removals:
fn = fn.replace(k, '')
fn = fn.strip()
return fn
def decode_base64(data, quiet=True):
try:
# Some builds of python don't throw an exception when the data is not
# proper Base64, so we check it first.
if BASE64.match(data):
return base64.urlsafe_b64decode(data)
else:
return data
except Exception as e:
if quiet:
return data
else:
raise DecodingError(str(e))
def decomp_gzip(data, quiet=True):
try:
buf = StringIO(str(data))
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
return gh.read()
except Exception as e:
if quiet:
return data
else:
raise DecompressionError(str(e))
def extract_usergroup(ug_pair):
if not ug_pair:
return (None, None)
ug_parted = ug_pair.split(':', 1)
u = ug_parted[0].strip()
if len(ug_parted) == 2:
g = ug_parted[1].strip()
else:
g = None
if not u or u == "-1" or u.lower() == "none":
u = None
if not g or g == "-1" or g.lower() == "none":
g = None
return (u, g)
def find_modules(root_dir):
entries = dict()
for fname in glob.glob(os.path.join(root_dir, "*.py")):
if not os.path.isfile(fname):
continue
modname = os.path.basename(fname)[0:-3]
modname = modname.strip()
if modname and modname.find(".") == -1:
entries[fname] = modname
return entries
def multi_log(text, console=True, stderr=True,
log=None, log_level=logging.DEBUG):
if stderr:
sys.stderr.write(text)
if console:
# Some containers lack /dev/console, so we send output to
# stdout and configure upstart with "console output" and
# systemd with "journal+console" and let them take care of
# getting output to the console.
print text
if log:
if text[-1] == "\n":
log.log(log_level, text[:-1])
else:
log.log(log_level, text)
def is_ipv4(instr):
"""determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
if len(toks) != 4:
return False
try:
toks = [x for x in toks if (int(x) < 256 and int(x) > 0)]
except:
return False
return (len(toks) == 4)
def get_cfg_option_bool(yobj, key, default=False):
if key not in yobj:
return default
return translate_bool(yobj[key])
def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
val = yobj[key]
if not isinstance(val, (str, basestring)):
val = str(val)
return val
def system_info():
return {
'platform': platform.platform(),
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
'dist': platform.linux_distribution(),
}
def get_cfg_option_list(yobj, key, default=None):
"""
Gets the C{key} config option from C{yobj} as a list of strings. If the
key is present as a single string it will be returned as a list with one
string arg.
@param yobj: The configuration object.
@param key: The configuration key to get.
@param default: The default to return if key is not found.
@return: The configuration option as a list of strings or default if key
is not found.
"""
if not key in yobj:
return default
if yobj[key] is None:
return []
val = yobj[key]
if isinstance(val, (list)):
cval = [v for v in val]
return cval
if not isinstance(val, (basestring)):
val = str(val)
return [val]
# get a cfg entry by its path array
# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
def get_cfg_by_path(yobj, keyp, default=None):
cur = yobj
for tok in keyp:
if tok not in cur:
return default
cur = cur[tok]
return cur
def fixup_output(cfg, mode):
(outfmt, errfmt) = get_output_cfg(cfg, mode)
redirect_output(outfmt, errfmt)
return (outfmt, errfmt)
# redirect_output(outfmt, errfmt, orig_out, orig_err)
# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
# fmt can be:
# > FILEPATH
# >> FILEPATH
# | program [ arg1 [ arg2 [ ... ] ] ]
#
# with a '|', arguments are passed to shell, so one level of
# shell escape is required.
#
# if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true
# value then output input will not be closed (useful for debugging).
#
def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")):
LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT")
return
if not o_out:
o_out = sys.stdout
if not o_err:
o_err = sys.stderr
if outfmt:
LOG.debug("Redirecting %s to %s", o_out, outfmt)
(mode, arg) = outfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
if mode == ">":
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
new_fp = proc.stdin # pylint: disable=E1101
else:
raise TypeError("Invalid type for output format: %s" % outfmt)
if o_out:
os.dup2(new_fp.fileno(), o_out.fileno())
if errfmt == outfmt:
LOG.debug("Redirecting %s to %s", o_err, outfmt)
os.dup2(new_fp.fileno(), o_err.fileno())
return
if errfmt:
LOG.debug("Redirecting %s to %s", o_err, errfmt)
(mode, arg) = errfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
if mode == ">":
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
new_fp = proc.stdin # pylint: disable=E1101
else:
raise TypeError("Invalid type for error format: %s" % errfmt)
if o_err:
os.dup2(new_fp.fileno(), o_err.fileno())
def make_url(scheme, host, port=None,
path='', params='', query='', fragment=''):
pieces = []
pieces.append(scheme or '')
netloc = ''
if host:
netloc = str(host)
if port is not None:
netloc += ":" + "%s" % (port)
pieces.append(netloc or '')
pieces.append(path or '')
pieces.append(params or '')
pieces.append(query or '')
pieces.append(fragment or '')
return urlparse.urlunparse(pieces)
def mergemanydict(srcs, reverse=False):
if reverse:
srcs = reversed(srcs)
merged_cfg = {}
for cfg in srcs:
if cfg:
# Figure out which mergers to apply...
mergers_to_apply = mergers.dict_extract_mergers(cfg)
if not mergers_to_apply:
mergers_to_apply = mergers.default_mergers()
merger = mergers.construct(mergers_to_apply)
merged_cfg = merger.merge(merged_cfg, cfg)
return merged_cfg
@contextlib.contextmanager
def chdir(ndir):
curr = os.getcwd()
try:
os.chdir(ndir)
yield ndir
finally:
os.chdir(curr)
@contextlib.contextmanager
def umask(n_msk):
old = os.umask(n_msk)
try:
yield old
finally:
os.umask(old)
@contextlib.contextmanager
def tempdir(**kwargs):
# This seems like it was only added in python 3.2
# Make it since its useful...
# See: http://bugs.python.org/file12970/tempdir.patch
tdir = tempfile.mkdtemp(**kwargs)
try:
yield tdir
finally:
del_dir(tdir)
def center(text, fill, max_len):
return '{0:{fill}{align}{size}}'.format(text, fill=fill,
align="^", size=max_len)
def del_dir(path):
LOG.debug("Recursively deleting %s", path)
shutil.rmtree(path)
def runparts(dirp, skip_no_exist=True):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
# Use shell=True so that if the user omits the #!, there is
# still some chance it will succeed.
subp([exe_path], capture=False, shell=True)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)
if failed and attempted:
raise RuntimeError('Runparts: %s failures in %s attempted commands'
% (len(failed), len(attempted)))
# read_optional_seed
# returns boolean indicating success or failure (presense of files)
# if files are present, populates 'fill' dictionary with 'user-data' and
# 'meta-data' entries
def read_optional_seed(fill, base="", ext="", timeout=5):
try:
(md, ud) = read_seeded(base, ext, timeout)
fill['user-data'] = ud
fill['meta-data'] = md
return True
except IOError as e:
if e.errno == errno.ENOENT:
return False
raise
def fetch_ssl_details(paths=None):
ssl_details = {}
# Lookup in these locations for ssl key/cert files
ssl_cert_paths = [
'/var/lib/cloud/data/ssl',
'/var/lib/cloud/instance/data/ssl',
]
if paths:
ssl_cert_paths.extend([
os.path.join(paths.get_ipath_cur('data'), 'ssl'),
os.path.join(paths.get_cpath('data'), 'ssl'),
])
ssl_cert_paths = uniq_merge(ssl_cert_paths)
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
cert_file = None
for d in ssl_cert_paths:
if os.path.isfile(os.path.join(d, 'cert.pem')):
cert_file = os.path.join(d, 'cert.pem')
break
key_file = None
for d in ssl_cert_paths:
if os.path.isfile(os.path.join(d, 'key.pem')):
key_file = os.path.join(d, 'key.pem')
break
if cert_file and key_file:
ssl_details['cert_file'] = cert_file
ssl_details['key_file'] = key_file
elif cert_file:
ssl_details['cert_file'] = cert_file
return ssl_details
def read_file_or_url(url, timeout=5, retries=10,
headers=None, data=None, sec_between=1, ssl_details=None,
headers_cb=None):
url = url.lstrip()
if url.startswith("/"):
url = "file://%s" % url
if url.lower().startswith("file://"):
if data:
LOG.warn("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
return FileResponse(file_path, contents=load_file(file_path))
else:
return url_helper.readurl(url,
timeout=timeout,
retries=retries,
headers=headers,
headers_cb=headers_cb,
data=data,
sec_between=sec_between,
ssl_details=ssl_details)
def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
try:
blob = str(blob)
LOG.debug(("Attempting to load yaml from string "
"of length %s with allowed root types %s"),
len(blob), allowed)
converted = safeyaml.load(blob)
if not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
" but got %s instead") %
(allowed, type_utils.obj_name(converted)))
loaded = converted
except (yaml.YAMLError, TypeError, ValueError):
if len(blob) == 0:
LOG.debug("load_yaml given empty string, returning default")
else:
logexc(LOG, "Failed loading yaml blob")
return loaded
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
if base.startswith("/"):
base = "file://%s" % base
# default retries for file is 0. for network is 10
if base.startswith("file://"):
retries = file_retries
if base.find("%s") >= 0:
ud_url = base % ("user-data" + ext)
md_url = base % ("meta-data" + ext)
else:
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
md = None
if md_resp.ok():
md_str = str(md_resp)
md = load_yaml(md_str, default={})
ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
ud = None
if ud_resp.ok():
ud_str = str(ud_resp)
ud = ud_str
return (md, ud)
def read_conf_d(confd):
# Get reverse sorted list (later trumps newer)
confs = sorted(os.listdir(confd), reverse=True)
# Remove anything not ending in '.cfg'
confs = [f for f in confs if f.endswith(".cfg")]
# Remove anything not a file
confs = [f for f in confs
if os.path.isfile(os.path.join(confd, f))]
# Load them all so that they can be merged
cfgs = []
for fn in confs:
cfgs.append(read_conf(os.path.join(confd, fn)))
return mergemanydict(cfgs)
def read_conf_with_confd(cfgfile):
cfg = read_conf(cfgfile)
confd = False
if "conf_d" in cfg:
confd = cfg['conf_d']
if confd:
if not isinstance(confd, (str, basestring)):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
(cfgfile, type_utils.obj_name(confd)))
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
confd = "%s.d" % cfgfile
if not confd or not os.path.isdir(confd):
return cfg
# Conf.d settings override input configuration
confd_cfg = read_conf_d(confd)
return mergemanydict([confd_cfg, cfg])
def read_cc_from_cmdline(cmdline=None):
# this should support reading cloud-config information from
# the kernel command line. It is intended to support content of the
# format:
# cc: <yaml content here> [end_cc]
# this would include:
# cc: ssh_import_id: [smoser, kirkland]\\n
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
if cmdline is None:
cmdline = get_cmdline()
tag_begin = "cc:"
tag_end = "end_cc"
begin_l = len(tag_begin)
end_l = len(tag_end)
clen = len(cmdline)
tokens = []
begin = cmdline.find(tag_begin)
while begin >= 0:
end = cmdline.find(tag_end, begin + begin_l)
if end < 0:
end = clen
tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
"\n"))
begin = cmdline.find(tag_begin, end + end_l)
return '\n'.join(tokens)
def dos2unix(contents):
# find first end of line
pos = contents.find('\n')
if pos <= 0 or contents[pos - 1] != '\r':
return contents
return contents.replace('\r\n', '\n')
def get_hostname_fqdn(cfg, cloud):
# return the hostname and fqdn from 'cfg'. If not found in cfg,
# then fall back to data from cloud
if "fqdn" in cfg:
# user specified a fqdn. Default hostname then is based off that
fqdn = cfg['fqdn']
hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
else:
if "hostname" in cfg and cfg['hostname'].find('.') > 0:
# user specified hostname, and it had '.' in it
# be nice to them. set fqdn and hostname from that
fqdn = cfg['hostname']
hostname = cfg['hostname'][:fqdn.find('.')]
else:
# no fqdn set, get fqdn from cloud.
# get hostname from cfg if available otherwise cloud
fqdn = cloud.get_hostname(fqdn=True)
if "hostname" in cfg:
hostname = cfg['hostname']
else:
hostname = cloud.get_hostname()
return (hostname, fqdn)
def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
"""
For each host a single line should be present with
the following information:
IP_address canonical_hostname [aliases...]
Fields of the entry are separated by any number of blanks and/or tab
characters. Text from a "#" character until the end of the line is a
comment, and is ignored. Host names may contain only alphanumeric
characters, minus signs ("-"), and periods ("."). They must begin with
an alphabetic character and end with an alphanumeric character.
Optional aliases provide for name changes, alternate spellings, shorter
hostnames, or generic hostnames (for example, localhost).
"""
fqdn = None
try:
for line in load_file(filename).splitlines():
hashpos = line.find("#")
if hashpos >= 0:
line = line[0:hashpos]
line = line.strip()
if not line:
continue
# If there there is less than 3 entries
# (IP_address, canonical_hostname, alias)
# then ignore this line
toks = line.split()
if len(toks) < 3:
continue
if hostname in toks[2:]:
fqdn = toks[1]
break
except IOError:
pass
return fqdn
def get_cmdline_url(names=('cloud-config-url', 'url'),
starts="#cloud-config", cmdline=None):
if cmdline is None:
cmdline = get_cmdline()
data = keyval_str_to_dict(cmdline)
url = None
key = None
for key in names:
if key in data:
url = data[key]
break
if not url:
return (None, None, None)
resp = read_file_or_url(url)
if resp.contents.startswith(starts) and resp.ok():
return (key, url, str(resp))
return (key, url, None)
def is_resolvable(name):
"""determine if a url is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
Note, that normal nsswitch resolution is used here. So in order
to avoid any utilization of 'search' entries in /etc/resolv.conf
we have to append '.'.
The top level 'invalid' domain is invalid per RFC. And example.com
should also not exist. The random entry will be resolved inside
the search list.
"""
global _DNS_REDIRECT_IP # pylint: disable=W0603
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = ("does-not-exist.example.com.", "example.invalid.",
rand_str())
badresults = {}
for iname in badnames:
try:
result = socket.getaddrinfo(iname, None, 0, 0,
socket.SOCK_STREAM, socket.AI_CANONNAME)
badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
badips.add(sockaddr[0])
except (socket.gaierror, socket.error):
pass
_DNS_REDIRECT_IP = badips
if badresults:
LOG.debug("detected dns redirection: %s" % badresults)
try:
result = socket.getaddrinfo(name, None)
# check first result's sockaddr field
addr = result[0][4][0]
if addr in _DNS_REDIRECT_IP:
return False
return True
except (socket.gaierror, socket.error):
return False
def get_hostname():
hostname = socket.gethostname()
return hostname
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return (is_resolvable(urlparse.urlparse(url).hostname))
def search_for_mirror(candidates):
"""
Search through a list of mirror urls for one that works
This needs to return quickly.
"""
for cand in candidates:
try:
# Allow either a proper URL or a bare hostname / IP
if is_resolvable_url(cand) or is_resolvable(cand):
return cand
except Exception:
pass
return None
def close_stdin():
"""
reopen stdin as /dev/null so even subprocesses or other os level things get
/dev/null as input.
if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true
value then input will not be closed (useful for debugging).
"""
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")):
return
with open(os.devnull) as fp:
os.dup2(fp.fileno(), sys.stdin.fileno())
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
TYPE=<filesystem>
LABEL=<label>
UUID=<uuid>
"""
blk_id_cmd = ['blkid']
options = []
if criteria:
# Search for block devices with tokens named NAME that
# have the value 'value' and display any devices which are found.
# Common values for NAME include TYPE, LABEL, and UUID.
# If there are no devices specified on the command line,
# all block devices will be searched; otherwise,
# only search the devices specified by the user.
options.append("-t%s" % (criteria))
if tag:
# For each (specified) device, show only the tags that match tag.
options.append("-s%s" % (tag))
if no_cache:
# If you want to start with a clean cache
# (i.e. don't report devices previously scanned
# but not necessarily available at this time), specify /dev/null.
options.extend(["-c", "/dev/null"])
if oformat:
# Display blkid's output using the specified format.
# The format parameter may be:
# full, value, list, device, udev, export
options.append('-o%s' % (oformat))
if path:
options.append(path)
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
(out, _err) = subp(cmd, rcs=[0, 2])
entries = []
for line in out.splitlines():
line = line.strip()
if line:
entries.append(line)
return entries
def peek_file(fname, max_bytes):
LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
with open(fname, 'rb') as ifh:
return ifh.read(max_bytes)
def uniq_list(in_list):
out_list = []
for i in in_list:
if i in out_list:
continue
else:
out_list.append(i)
return out_list
def load_file(fname, read_cb=None, quiet=False):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
ofh = StringIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
except IOError as e:
if not quiet:
raise
if e.errno != errno.ENOENT:
raise
contents = ofh.getvalue()
LOG.debug("Read %s bytes from %s", len(contents), fname)
return contents
def get_cmdline():
if 'DEBUG_PROC_CMDLINE' in os.environ:
cmdline = os.environ["DEBUG_PROC_CMDLINE"]
else:
try:
cmdline = load_file("/proc/cmdline").strip()
except:
cmdline = ""
return cmdline
def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
bytes_piped = 0
while True:
data = in_fh.read(chunk_size)
if data == '':
break
else:
out_fh.write(data)
bytes_piped += len(data)
if chunk_cb:
chunk_cb(bytes_piped)
out_fh.flush()
return bytes_piped
def chownbyid(fname, uid=None, gid=None):
if uid in [None, -1] and gid in [None, -1]:
# Nothing to do
return
LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
os.chown(fname, uid, gid)
def chownbyname(fname, user=None, group=None):
uid = -1
gid = -1
try:
if user:
uid = pwd.getpwnam(user).pw_uid
if group:
gid = grp.getgrnam(group).gr_gid
except KeyError as e:
raise OSError("Unknown user or group: %s" % (e))
chownbyid(fname, uid, gid)
# Always returns well formated values
# cfg is expected to have an entry 'output' in it, which is a dictionary
# that includes entries for 'init', 'config', 'final' or 'all'
# init: /var/log/cloud.out
# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
# final:
# output: "| logger -p"
# error: "> /dev/null"
# this returns the specific 'mode' entry, cleanly formatted, with value
def get_output_cfg(cfg, mode):
ret = [None, None]
if not cfg or not 'output' in cfg:
return ret
outcfg = cfg['output']
if mode in outcfg:
modecfg = outcfg[mode]
else:
if 'all' not in outcfg:
return ret
# if there is a 'all' item in the output list
# then it applies to all users of this (init, config, final)
modecfg = outcfg['all']
# if value is a string, it specifies stdout and stderr
if isinstance(modecfg, str):
ret = [modecfg, modecfg]
# if its a list, then we expect (stdout, stderr)
if isinstance(modecfg, list):
if len(modecfg) > 0:
ret[0] = modecfg[0]
if len(modecfg) > 1:
ret[1] = modecfg[1]
# if it is a dictionary, expect 'out' and 'error'
# items, which indicate out and error
if isinstance(modecfg, dict):
if 'output' in modecfg:
ret[0] = modecfg['output']
if 'error' in modecfg:
ret[1] = modecfg['error']
# if err's entry == "&1", then make it same as stdout
# as in shell syntax of "echo foo >/dev/null 2>&1"
if ret[1] == "&1":
ret[1] = ret[0]
swlist = [">>", ">", "|"]
for i in range(len(ret)):
if not ret[i]:
continue
val = ret[i].lstrip()
found = False
for s in swlist:
if val.startswith(s):
val = "%s %s" % (s, val[len(s):].strip())
found = True
break
if not found:
# default behavior is append
val = "%s %s" % (">>", val.strip())
ret[i] = val
return ret
def logexc(log, msg, *args):
# Setting this here allows this to change
# levels easily (not always error level)
# or even desirable to have that much junk
# coming out to a non-debug stream
if msg:
log.warn(msg, *args)
# Debug gets the full trace
log.debug(msg, exc_info=1, *args)
def hash_blob(blob, routine, mlen=None):
hasher = hashlib.new(routine)
hasher.update(blob)
digest = hasher.hexdigest()
# Don't get to long now
if mlen is not None:
return digest[0:mlen]
else:
return digest
def is_user(name):
try:
if pwd.getpwnam(name):
return True
except KeyError:
return False
def is_group(name):
try:
if grp.getgrnam(name):
return True
except KeyError:
return False
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
# TODO(harlowja) use a se guard here??
os.rename(src, dest)
def ensure_dirs(dirlist, mode=0755):
for d in dirlist:
ensure_dir(d, mode)
def read_write_cmdline_url(target_fn):
if not os.path.exists(target_fn):
try:
(key, url, content) = get_cmdline_url()
except:
logexc(LOG, "Failed fetching command line url")
return
try:
if key and content:
write_file(target_fn, content, mode=0600)
LOG.debug(("Wrote to %s with contents of command line"
" url %s (len=%s)"), target_fn, url, len(content))
elif key and not content:
LOG.debug(("Command line key %s with url"
" %s had no contents"), key, url)
except:
logexc(LOG, "Failed writing url content to %s", target_fn)
def yaml_dumps(obj):
formatted = yaml.dump(obj,
line_break="\n",
indent=4,
explicit_start=True,
explicit_end=True,
default_flow_style=False)
return formatted
def ensure_dir(path, mode=None):
if not os.path.isdir(path):
# Make the dir and adjust the mode
with SeLinuxGuard(os.path.dirname(path), recursive=True):
os.makedirs(path)
chmod(path, mode)
else:
# Just adjust the mode
chmod(path, mode)
@contextlib.contextmanager
def unmounter(umount):
try:
yield umount
finally:
if umount:
umount_cmd = ["umount", '-l', umount]
subp(umount_cmd)
def mounts():
mounted = {}
try:
# Go through mounts to see what is already mounted
mount_locs = load_file("/proc/mounts").splitlines()
for mpline in mount_locs:
# Format at: man fstab
try:
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
except:
continue
# If the name of the mount point contains spaces these
# can be escaped as '\040', so undo that..
mp = mp.replace("\\040", " ")
mounted[dev] = {
'fstype': fstype,
'mountpoint': mp,
'opts': opts,
}
LOG.debug("Fetched %s mounts from %s", mounted, "/proc/mounts")
except (IOError, OSError):
logexc(LOG, "Failed fetching mount points from /proc/mounts")
return mounted
def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
"""
Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
returned. If data != None, also pass data to callback.
"""
mounted = mounts()
with tempdir() as tmpd:
umount = False
if device in mounted:
mountpoint = mounted[device]['mountpoint']
else:
try:
mountcmd = ['mount']
mountopts = []
if rw:
mountopts.append('rw')
else:
mountopts.append('ro')
if sync:
# This seems like the safe approach to do
# (ie where this is on by default)
mountopts.append("sync")
if mountopts:
mountcmd.extend(["-o", ",".join(mountopts)])
if mtype:
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
subp(mountcmd)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
except (IOError, OSError) as exc:
raise MountFailedError(("Failed mounting %s "
"to %s due to: %s") %
(device, tmpd, exc))
# Be nice and ensure it ends with a slash
if not mountpoint.endswith("/"):
mountpoint += "/"
with unmounter(umount):
if data is None:
ret = callback(mountpoint)
else:
ret = callback(mountpoint, data)
return ret
def get_builtin_cfg():
# Deep copy so that others can't modify
return obj_copy.deepcopy(CFG_BUILTIN)
def sym_link(source, link):
LOG.debug("Creating symbolic link from %r => %r" % (link, source))
os.symlink(source, link)
def del_file(path):
LOG.debug("Attempting to remove %s", path)
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
def copy(src, dest):
LOG.debug("Copying %s to %s", src, dest)
shutil.copy(src, dest)
def time_rfc2822():
try:
ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
except:
ts = "??"
return ts
def uptime():
uptime_str = '??'
try:
contents = load_file("/proc/uptime").strip()
if contents:
uptime_str = contents.split()[0]
except:
logexc(LOG, "Unable to read uptime from /proc/uptime")
return uptime_str
def append_file(path, content):
write_file(path, content, omode="ab", mode=None)
def ensure_file(path, mode=0644):
write_file(path, content='', omode="ab", mode=mode)
def safe_int(possible_int):
try:
return int(possible_int)
except (ValueError, TypeError):
return None
def chmod(path, mode):
real_mode = safe_int(mode)
if path and real_mode:
with SeLinuxGuard(path):
os.chmod(path, real_mode)
def write_file(filename, content, mode=0644, omode="wb"):
"""
Writes a file with the given content and sets the file mode as specified.
Resotres the SELinux context if possible.
@param filename: The full path of the file to write.
@param content: The content to write to the file.
@param mode: The filesystem mode to set on the file.
@param omode: The open mode used when opening the file (r, rb, a, etc.)
"""
ensure_dir(os.path.dirname(filename))
LOG.debug("Writing to %s - %s: [%s] %s bytes",
filename, omode, mode, len(content))
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
fh.flush()
chmod(filename, mode)
def delete_dir_contents(dirname):
"""
Deletes all contents of a directory without deleting the directory itself.
@param dirname: The directory whose contents should be deleted.
"""
for node in os.listdir(dirname):
node_fullpath = os.path.join(dirname, node)
if os.path.isdir(node_fullpath):
del_dir(node_fullpath)
else:
del_file(node_fullpath)
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
close_stdin=False, pipe_cat=False, logstring=False):
if data and close_stdin:
raise ValueError('Incompatible parameters: data and close_stdin')
if rcs is None:
rcs = [0]
try:
if not logstring:
LOG.debug(("Running command %s with allowed return codes %s"
" (shell=%s, capture=%s)"), args, rcs, shell, capture)
else:
LOG.debug(("Running hidden command to protect sensitive "
"input/output logstring: %s"), logstring)
if not capture:
stdout = None
stderr = None
else:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
stdin = subprocess.PIPE
# Some processes are less chatty when piped through cat, because they
# won't detect a terminal (yum being a prime example).
if pipe_cat:
cat = subprocess.Popen('cat', stdout=stdout, stderr=stderr,
stdin=subprocess.PIPE)
sp = subprocess.Popen(args, stdout=cat.stdin,
stderr=stderr, stdin=stdin,
env=env, shell=shell)
if close_stdin:
sp.stdin.close()
(_out, err) = sp.communicate(data)
(out, _err) = cat.communicate()
else:
sp = subprocess.Popen(args, stdout=stdout,
stderr=stderr, stdin=stdin,
env=env, shell=shell)
if close_stdin:
sp.stdin.close()
(out, err) = sp.communicate(data)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e)
rc = sp.returncode # pylint: disable=E1101
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
exit_code=rc,
cmd=args)
# Just ensure blank instead of none?? (iff capturing)
if not out and capture:
out = ''
if not err and capture:
err = ''
return (out, err)
def make_header(comment_char="#", base='created'):
ci_ver = version.version_string()
header = str(comment_char)
header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
header += " on %s" % time_rfc2822()
return header
def abs_join(*paths):
return os.path.abspath(os.path.join(*paths))
# shellify, takes a list of commands
# for each entry in the list
# if it is an array, shell protect it (with single ticks)
# if it is a string, do nothing
def shellify(cmdlist, add_header=True):
content = ''
if add_header:
content += "#!/bin/sh\n"
escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
cmds_made = 0
for args in cmdlist:
# If the item is a list, wrap all items in single tick.
# If its not, then just write it directly.
if isinstance(args, list):
fixed = []
for f in args:
fixed.append("'%s'" % (str(f).replace("'", escaped)))
content = "%s%s\n" % (content, ' '.join(fixed))
cmds_made += 1
elif isinstance(args, (str, basestring)):
content = "%s%s\n" % (content, args)
cmds_made += 1
else:
raise RuntimeError(("Unable to shellify type %s"
" which is not a list or string")
% (type_utils.obj_name(args)))
LOG.debug("Shellified %s commands.", cmds_made)
return content
def is_container():
"""
Checks to see if this code running in a container of some sort
"""
for helper in CONTAINER_TESTS:
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
subp([helper])
return True
except (IOError, OSError):
pass
# this code is largely from the logic in
# ubuntu's /etc/init/container-detect.conf
try:
# Detect old-style libvirt
# Detect OpenVZ containers
pid1env = get_proc_env(1)
if "container" in pid1env:
return True
if "LIBVIRT_LXC_UUID" in pid1env:
return True
except (IOError, OSError):
pass
# Detect OpenVZ containers
if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
return True
try:
# Detect Vserver containers
lines = load_file("/proc/self/status").splitlines()
for line in lines:
if line.startswith("VxID:"):
(_key, val) = line.strip().split(":", 1)
if val != "0":
return True
except (IOError, OSError):
pass
return False
def get_proc_env(pid):
"""
Return the environment in a dict that a given process id was started with.
"""
env = {}
fn = os.path.join("/proc/", str(pid), "environ")
try:
contents = load_file(fn)
toks = contents.split("\x00")
for tok in toks:
if tok == "":
continue
(name, val) = tok.split("=", 1)
if name:
env[name] = val
except (IOError, OSError):
pass
return env
def keyval_str_to_dict(kvstring):
ret = {}
for tok in kvstring.split():
try:
(key, val) = tok.split("=", 1)
except ValueError:
key = tok
val = True
ret[key] = val
return ret
def is_partition(device):
if device.startswith("/dev/"):
device = device[5:]
return os.path.isfile("/sys/class/block/%s/partition" % device)
def expand_package_list(version_fmt, pkgs):
# we will accept tuples, lists of tuples, or just plain lists
if not isinstance(pkgs, list):
pkgs = [pkgs]
pkglist = []
for pkg in pkgs:
if isinstance(pkg, basestring):
pkglist.append(pkg)
continue
if isinstance(pkg, (tuple, list)):
if len(pkg) < 1 or len(pkg) > 2:
raise RuntimeError("Invalid package & version tuple.")
if len(pkg) == 2 and pkg[1]:
pkglist.append(version_fmt % tuple(pkg))
continue
pkglist.append(pkg[0])
else:
raise RuntimeError("Invalid package type.")
return pkglist
def parse_mount_info(path, mountinfo_lines, log=LOG):
"""Return the mount information for PATH given the lines from
/proc/$$/mountinfo."""
path_elements = [e for e in path.split('/') if e]
devpth = None
fs_type = None
match_mount_point = None
match_mount_point_elements = None
for i, line in enumerate(mountinfo_lines):
parts = line.split()
# Completely fail if there is anything in any line that is
# unexpected, as continuing to parse past a bad line could
# cause an incorrect result to be returned, so it's better
# return nothing than an incorrect result.
# The minimum number of elements in a valid line is 10.
if len(parts) < 10:
log.debug("Line %d has two few columns (%d): %s",
i + 1, len(parts), line)
return None
mount_point = parts[4]
mount_point_elements = [e for e in mount_point.split('/') if e]
# Ignore mounts deeper than the path in question.
if len(mount_point_elements) > len(path_elements):
continue
# Ignore mounts where the common path is not the same.
l = min(len(mount_point_elements), len(path_elements))
if mount_point_elements[0:l] != path_elements[0:l]:
continue
# Ignore mount points higher than an already seen mount
# point.
if (match_mount_point_elements is not None and
len(match_mount_point_elements) > len(mount_point_elements)):
continue
# Find the '-' which terminates a list of optional columns to
# find the filesystem type and the path to the device. See
# man 5 proc for the format of this file.
try:
i = parts.index('-')
except ValueError:
log.debug("Did not find column named '-' in line %d: %s",
i + 1, line)
return None
# Get the path to the device.
try:
fs_type = parts[i + 1]
devpth = parts[i + 2]
except IndexError:
log.debug("Too few columns after '-' column in line %d: %s",
i + 1, line)
return None
match_mount_point = mount_point
match_mount_point_elements = mount_point_elements
if devpth and fs_type and match_mount_point:
return (devpth, fs_type, match_mount_point)
else:
return None
def get_mount_info(path, log=LOG):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
# does not return the ID of the device.
#
# Here, / has a device of 18 (decimal).
#
# $ stat /
# File: '/'
# Size: 234 Blocks: 0 IO Block: 4096 directory
# Device: 12h/18d Inode: 256 Links: 1
# Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
# Access: 2013-01-13 07:31:04.358011255 +0000
# Modify: 2013-01-13 18:48:25.930011255 +0000
# Change: 2013-01-13 18:48:25.930011255 +0000
# Birth: -
#
# Find where / is mounted:
#
# $ mount | grep ' / '
# /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
#
# And the device ID for /dev/vda1 is not 18:
#
# $ ls -l /dev/vda1
# brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
#
# So use /proc/$$/mountinfo to find the device underlying the
# input path.
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log)
| gpl-3.0 | 131,001,434,945,240,060 | 29.119486 | 79 | 0.565012 | false |
cedricporter/funcat | funcat/helper.py | 1 | 1619 | # -*- coding: utf-8 -*-
#
from __future__ import print_function
import datetime
import numpy as np
from .context import ExecutionContext, set_current_security, set_current_date, symbol
from .utils import getsourcelines, FormulaException, get_int_date
def suppress_numpy_warn(func):
def wrapper(*args, **kwargs):
try:
old_settings = np.seterr(all='ignore')
return func(*args, **kwargs)
finally:
np.seterr(**old_settings) # reset to default
return wrapper
def choose(order_book_id, func, callback):
set_current_security(order_book_id)
try:
if func():
date = ExecutionContext.get_current_date()
callback(date, order_book_id, symbol(order_book_id))
except FormulaException as e:
pass
@suppress_numpy_warn
def select(func, start_date="2016-10-01", end_date=None, callback=print):
print(getsourcelines(func))
start_date = get_int_date(start_date)
if end_date is None:
end_date = datetime.date.today()
data_backend = ExecutionContext.get_data_backend()
order_book_id_list = data_backend.get_order_book_id_list()
trading_dates = data_backend.get_trading_dates(start=start_date, end=end_date)
for idx, date in enumerate(reversed(trading_dates)):
if end_date and date > get_int_date(end_date):
continue
if date < get_int_date(start_date):
break
set_current_date(str(date))
print("[{}]".format(date))
for order_book_id in order_book_id_list:
choose(order_book_id, func, callback)
print("")
| apache-2.0 | 2,940,868,850,329,783,300 | 29.54717 | 85 | 0.639901 | false |
seecr/meresco-sequentialstore | test/__init__.py | 1 | 1093 | ## begin license ##
#
# "Meresco SequentialStore" contains components facilitating efficient sequentially ordered storing and retrieval.
#
# Copyright (C) 2014 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2014 Stichting Bibliotheek.nl (BNL) http://www.bibliotheek.nl
#
# This file is part of "Meresco SequentialStore"
#
# "Meresco SequentialStore" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco SequentialStore" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco SequentialStore"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
| gpl-2.0 | 2,045,231,501,596,955,000 | 44.541667 | 114 | 0.761208 | false |
qunying/gps | share/library/save_on_compile.py | 1 | 1405 | """
Automatically saves the contents of the Messages window when a
compilation has finished.
The output is saved in a file called "messages.txt" (or some other name set in
the preferences) in the root project's object directory. If there is no object
directory for the root project, then the file is saved in the directory of the
project file itself.
"""
from GPS import Preference, Project, Console
from os.path import dirname, join
from gps_utils import hook
file_name_pref = Preference("Plugins/save_on_compile/file_name").create(
"File Name", "string",
"Name of the file you want to save the messages into",
"messages.txt")
@hook('compilation_finished')
def on_compilation_finished(category, target_name, mode_name, status):
obj_dirs = Project.root().object_dirs(False)
path = obj_dirs[0] if obj_dirs else dirname(Project.root().file().path)
base = file_name_pref.get()
if not base:
Console().write(
"plugin save_on_compile.py: no file name is specified in the"
" preferences\n")
else:
try:
full = join(path, base)
with open(full, "w") as f:
f.write(Console().get_text())
Console().write("Output saved in %s\n" % (full, ))
except:
Console().write(
"plugin save_on_compile.py: error saving in '%s'\n" % (
full, ))
| gpl-3.0 | -4,895,249,109,400,763,000 | 32.452381 | 78 | 0.637722 | false |
RoboPi-CSEDU/rupai | Test/test1.py | 1 | 1299 | from os import environ, path
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
#import pyaudio
MODELDIR = "model"
DATADIR = "data"
# Create a decoder with certain model
config = Decoder.default_config()
config.set_string('-hmm', path.join(MODELDIR, 'en-us'))
config.set_string('-lm', path.join(MODELDIR, 'small.lm'))
config.set_string('-dict', path.join(MODELDIR, 'small.dic'))
config.set_string('-mllr', path.join(MODELDIR, 'mllr_matrix'))
config.set_string('-logfn', '/dev/null')
#decoder = Decoder(config)
# Decode streaming data.
decoder = Decoder(config)
#decoder.set_kws('keyphrase', path.join(MODELDIR, 'phrase'))
#decoder.set_search('keyphrase')
decoder.start_utt()
stream = open(path.join(DATADIR, 'left1.wav'), 'rb')
#p = pyaudio.PyAudio()
#stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024)
#stream.start_stream()
#buf = []
for i in range(100):
buf = stream.read(1024)
if buf:
decoder.process_raw(buf, False, False)
else:
break
decoder.end_utt()
hypothesis = decoder.hyp()
logmath = decoder.get_logmath()
print ('Best hypothesis: ', hypothesis.hypstr)
#if hypothesis.hypstr=='LEFT':
# print("LEFT found")
#print ('Best hypothesis segments: ', [seg.word for seg in decoder.seg()])
| mit | 365,164,771,618,399,900 | 22.618182 | 100 | 0.706697 | false |
CodeForPhilly/chime | src/penn_chime/view/st_display.py | 1 | 14906 | """Streamlit display."""
import os
import json
from logging import INFO, basicConfig, getLogger
import pandas as pd
import i18n
from sys import stdout
from ..constants import (
CHANGE_DATE,
DOCS_URL,
EPSILON,
FLOAT_INPUT_MIN,
FLOAT_INPUT_STEP,
VERSION,
)
from ..model.parameters import Parameters, Disposition
from ..utils import (
dataframe_to_base64,
excel_to_base64,
)
from .spreadsheet import spreadsheet
basicConfig(
level=INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
stream=stdout,
)
logger = getLogger(__name__)
hide_menu_style = """
<style>
#MainMenu {visibility: hidden;}
</style>
"""
########
# Text #
########
def display_header(st, m, p):
infected_population_warning_str = (
i18n.t("presentation-infected-population-warning")
if m.infected > p.population
else ""
)
st.markdown(
i18n.t("presentation-header"),
unsafe_allow_html=True,
)
st.markdown(i18n.t("presentation-notice"))
st.markdown(i18n.t("presentation-developed-by").format(
docs_url=DOCS_URL))
st.markdown(
i18n.t("presentation-estimated-number-of-infection")
.format(
total_infections=m.infected,
current_hosp=p.current_hospitalized,
hosp_rate=p.hospitalized.rate,
S=p.population,
market_share=p.market_share,
recovery_days=p.infectious_days,
r_naught=m.r_naught,
doubling_time=p.doubling_time,
daily_growth=m.daily_growth_rate * 100.0,
infected_population_warning_str=infected_population_warning_str,
mitigation_str=(
i18n.t("presentation-mitigation-rt-less-then-1")
if m.r_t < 1
else i18n.t("presentation-mitigation-rt-more-then-equal-1")
).format(
relative_contact_rate=p.relative_contact_rate,
doubling_time_t=abs(m.doubling_time_t),
r_t=m.r_t,
daily_growth_t=m.daily_growth_rate_t * 100.0,
),
)
)
return None
class Input:
"""Helper to separate Streamlit input definition from creation/rendering"""
def __init__(self, st_obj, label, value, kwargs):
self.st_obj = st_obj
self.label = label
self.value = value
self.kwargs = kwargs
def __call__(self):
return self.st_obj(self.label, value=self.value, **self.kwargs)
class NumberInput(Input):
def __init__(
self,
st_obj,
label,
min_value=None,
max_value=None,
value=None,
step=None,
format=None,
key=None,
):
kwargs = dict(
min_value=min_value,
max_value=max_value,
step=step,
format=format,
key=key,
)
super().__init__(st_obj.number_input, label, value, kwargs)
class DateInput(Input):
def __init__(self, st_obj, label, value=None, key=None):
kwargs = dict(key=key)
super().__init__(st_obj.date_input, label, value, kwargs)
class PercentInput(NumberInput):
def __init__(
self,
st_obj,
label,
min_value=0.0,
max_value=100.0,
value=None,
step=FLOAT_INPUT_STEP,
format="%f",
key=None,
):
super().__init__(
st_obj,
label,
min_value,
max_value,
value * 100.0,
step,
format,
key,
)
def __call__(self):
return super().__call__() / 100.0
class CheckboxInput(Input):
def __init__(self, st_obj, label, value=None, key=None):
kwargs = dict(key=key)
super().__init__(st_obj.checkbox, label, value, kwargs)
def display_sidebar(st, d: Parameters) -> Parameters:
"""
Initializes the UI in the sidebar. These function calls create input elements, and bind the values they are set to
to the appropriate variables. It's similar to Ember or Angular, if you are familiar with those frameworks.
"""
st_obj = st.sidebar
# used_widget_key = st.get_last_used_widget_key ( )
current_hospitalized_input = NumberInput(
st_obj,
i18n.t("presentation-current-hospitalized"),
min_value=0,
value=d.current_hospitalized,
step=1,
format="%i",
)
n_days_input = NumberInput(
st_obj,
i18n.t("presentation-n-days"),
min_value=1,
max_value=30,
value=d.n_days,
step=1,
format="%i",
)
doubling_time_input = NumberInput(
st_obj,
i18n.t("presentation-doubling-time"),
min_value=0.5,
value=d.doubling_time,
step=0.25,
format="%f",
)
current_date_input = DateInput(
st_obj, i18n.t("presentation-current-date"), value=d.current_date,
)
date_first_hospitalized_input = DateInput(
st_obj, i18n.t("presentation-date-first-hospitalized"),
value=d.date_first_hospitalized,
)
mitigation_date_input = DateInput(
st_obj, i18n.t("presentation-mitigation-date"),
value=d.mitigation_date
)
relative_contact_pct_input = PercentInput(
st_obj,
i18n.t("presentation-relative-contact-rate"),
min_value=0.0,
max_value=100.0,
value=d.relative_contact_rate,
step=1.0,
)
hospitalized_pct_input = PercentInput(
st_obj,
i18n.t("presentation-hospitalized-rate"),
value=d.hospitalized.rate,
min_value=FLOAT_INPUT_MIN,
max_value=100.0
)
icu_pct_input = PercentInput(
st_obj,
i18n.t("presentation-icu-rate"),
min_value=0.0,
value=d.icu.rate,
step=0.05
)
ventilated_pct_input = PercentInput(
st_obj, i18n.t("presentation-ventilated-rate"), value=d.ventilated.rate,
)
hospitalized_days_input = NumberInput(
st_obj,
i18n.t("presentation-hospitalized-days"),
min_value=1,
value=d.hospitalized.days,
step=1,
format="%i",
)
icu_days_input = NumberInput(
st_obj,
i18n.t("presentation-icu-days"),
min_value=1,
value=d.icu.days,
step=1,
format="%i",
)
ventilated_days_input = NumberInput(
st_obj,
i18n.t("presentation-ventilated-days"),
min_value=1,
value=d.ventilated.days,
step=1,
format="%i",
)
market_share_pct_input = PercentInput(
st_obj,
i18n.t("presentation-market-share"),
min_value=0.5,
value=d.market_share,
)
population_input = NumberInput(
st_obj,
i18n.t("presentation-population"),
min_value=1,
value=(d.population),
step=1,
format="%i",
)
infectious_days_input = NumberInput(
st_obj,
i18n.t("presentation-infectious-days"),
min_value=1,
value=d.infectious_days,
step=1,
format="%i",
)
max_y_axis_set_input = CheckboxInput(
st_obj, i18n.t("presentation-max-y-axis-set")
)
max_y_axis_input = NumberInput(
st_obj, i18n.t("presentation-max-y-axis"), value=500, format="%i", step=25
)
# Build in desired order
st.sidebar.markdown(
"""**CHIME [{version}](https://github.com/CodeForPhilly/chime/releases/tag/{version}) ({change_date})**""".format(
change_date=CHANGE_DATE,
version=VERSION,
)
)
st.sidebar.markdown(
"### {hospital_parameters} [ℹ]({docs_url}/what-is-chime/parameters#hospital-parameters)".format(
docs_url=DOCS_URL,
hospital_parameters=i18n.t("presentation-hospital-parameters")
)
)
population = population_input()
market_share = market_share_pct_input()
# known_infected = known_infected_input()
current_hospitalized = current_hospitalized_input()
st.sidebar.markdown(
"### {spread_and_contact_parameters} [ℹ]({docs_url}/what-is-chime/parameters#spread-and-contact-parameters)".format(
docs_url=DOCS_URL,
spread_and_contact_parameters=i18n.t("presentation-spread-and-contact-parameters")
)
)
if st.sidebar.checkbox(
i18n.t("presentation-first-hospitalized-check")
):
date_first_hospitalized = date_first_hospitalized_input()
doubling_time = None
else:
doubling_time = doubling_time_input()
date_first_hospitalized = None
if st.sidebar.checkbox(
i18n.t("presentation-social-distancing-implemented"),
value=(d.relative_contact_rate > EPSILON)
):
mitigation_date = mitigation_date_input()
relative_contact_rate = relative_contact_pct_input()
else:
mitigation_date = None
relative_contact_rate = EPSILON
st.sidebar.markdown(
"### {severity_parameters} [ℹ]({docs_url}/what-is-chime/parameters#severity-parameters)".format(
docs_url=DOCS_URL,
severity_parameters=i18n.t("presentation-severity-parameters")
)
)
hospitalized_rate = hospitalized_pct_input()
icu_rate = icu_pct_input()
ventilated_rate = ventilated_pct_input()
infectious_days = infectious_days_input()
hospitalized_days = hospitalized_days_input()
icu_days = icu_days_input()
ventilated_days = ventilated_days_input()
st.sidebar.markdown(
"### {display_parameters} [ℹ]({docs_url}/what-is-chime/parameters#display-parameters)".format(
docs_url=DOCS_URL,
display_parameters=i18n.t("presentation-display-parameters")
)
)
n_days = n_days_input()
max_y_axis_set = max_y_axis_set_input()
max_y_axis = None
if max_y_axis_set:
max_y_axis = max_y_axis_input()
current_date = current_date_input()
use_log_scale = st.sidebar.checkbox(label=i18n.t("presentation-logarithmic-scale"), value=d.use_log_scale)
# Subscribe implementation
subscribe(st_obj)
return Parameters(
current_hospitalized=current_hospitalized,
current_date=current_date,
date_first_hospitalized=date_first_hospitalized,
doubling_time=doubling_time,
hospitalized=Disposition.create(
rate=hospitalized_rate,
days=hospitalized_days),
icu=Disposition.create(
rate=icu_rate,
days=icu_days),
infectious_days=infectious_days,
market_share=market_share,
max_y_axis=max_y_axis,
mitigation_date=mitigation_date,
n_days=n_days,
population=population,
recovered=d.recovered,
relative_contact_rate=relative_contact_rate,
ventilated=Disposition.create(
rate=ventilated_rate,
days=ventilated_days),
use_log_scale=use_log_scale
)
# Read the environment variables and create json key object to use with ServiceAccountCredentials
def readGoogleApiSecrets():
client_secret = {}
os.getenv
type = os.getenv ('GAPI_CRED_TYPE').strip()
print (type)
client_secret['type'] = type,
client_secret['project_id'] = os.getenv ('GAPI_CRED_PROJECT_ID'),
client_secret['private_key_id'] = os.getenv ('GAPI_CRED_PRIVATE_KEY_ID'),
client_secret['private_key'] = os.getenv ('GAPI_CRED_PRIVATE_KEY'),
client_secret['client_email'] = os.getenv ('GAPI_CRED_CLIENT_EMAIL'),
client_secret['client_id'] = os.getenv ('GAPI_CRED_CLIENT_ID'),
client_secret['auth_uri'] = os.getenv ('GAPI_CRED_AUTH_URI'),
client_secret['token_uri'] = os.getenv ('GAPI_CRED_TOKEN_URI'),
client_secret['auth_provider_x509_cert_url'] = os.getenv ('GAPI_CRED_AUTH_PROVIDER_X509_CERT_URL'),
client_secret['client_x509_cert_url'] = os.getenv ('GAPI_CRED_CLIENT_X509_CERT_URI'),
json_data = json.dumps (client_secret)
print(json_data)
return json_data
def readGoogleApiSecretsDict():
type = os.getenv ('GAPI_CRED_TYPE')
project_id = os.getenv ('GAPI_CRED_PROJECT_ID')
private_key_id = os.getenv ('GAPI_CRED_PRIVATE_KEY_ID')
private_key = os.getenv ('GAPI_CRED_PRIVATE_KEY')
client_email = os.getenv ('GAPI_CRED_CLIENT_EMAIL')
client_id = os.getenv ('GAPI_CRED_CLIENT_ID')
auth_uri = os.getenv ('GAPI_CRED_AUTH_URI')
token_uri = os.getenv ('GAPI_CRED_TOKEN_URI')
auth_provider_x509_cert_url = os.getenv ('GAPI_CRED_AUTH_PROVIDER_X509_CERT_URL')
client_x509_cert_url = os.getenv ('GAPI_CRED_CLIENT_X509_CERT_URI')
secret = {
'type' : type,
'project_id' : project_id,
'private_key_id' : private_key_id,
'private_key':private_key,
'client_email': client_email,
'client_id': client_id,
'auth_uri': auth_uri,
'token_uri': token_uri,
'auth_provider_x509_cert_url':auth_provider_x509_cert_url,
'client_x509_cert_url':client_x509_cert_url
}
return secret
def subscribe(st_obj):
st_obj.subheader (i18n.t("presentation-subscribe"))
email = st_obj.text_input (label=i18n.t("presentation-enter-email"), value="", key="na_lower_1")
name = st_obj.text_input (label=i18n.t("presentation-enter-name"), value="", key="na_upper_1")
affiliation = st_obj.text_input (label=i18n.t("presentation-enter-affiliation"), value="", key="na_upper_2")
if st_obj.button (label=i18n.t("presentation-submit"), key="ta_submit_1"):
row = [email, name, affiliation]
send_subscription_to_google_sheet_secret_json(st_obj, row)
def send_subscription_to_google_sheet_secret_json(st_obj, row):
json_secret = "/mnt/google-api-creds/client_secret.json"
#print(json_secret)
spr = spreadsheet (st_obj, json_secret)
spr.writeToSheet("CHIME Form Submissions", row)
def send_subscription_to_google_sheet_secret_dict(st_obj, row):
json_secret = readGoogleApiSecretsDict()
#print(json_secret)
spr = spreadsheet(st_obj, json_secret)
spr.writeToSheet("CHIME Form Submissions", row)
def display_footer(st):
st.subheader(i18n.t("presentation-references-acknowledgements"))
st.markdown(
i18n.t("presentation-references-acknowledgements-text")
)
st.markdown(i18n.t("presentation-copyright"))
def display_download_link(st, p, filename: str, df: pd.DataFrame):
csv = dataframe_to_base64(df.rename(p.labels, axis=1))
st.markdown(
i18n.t("presentation-download").format(
csv=csv, filename=filename
),
unsafe_allow_html=True,
)
def display_excel_download_link(st, filename: str, src: str):
excel = excel_to_base64(src)
st.markdown(
i18n.t("presentation-excel-download").format(
excel=excel, filename=filename
),
unsafe_allow_html=True,
)
| mit | 632,211,188,996,764,700 | 29.972973 | 124 | 0.603101 | false |
frc1418/2015-robot | robot/components/drive.py | 1 | 4164 | import wpilib
from networktables import NetworkTable
class Drive(object):
'''
The sole interaction between the robot and its driving system
occurs here. Anything that wants to drive the robot must go
through this class.
'''
def __init__(self, robotDrive, gyro, backInfrared):
'''
Constructor.
:param robotDrive: a `wpilib.RobotDrive` object
'''
self.isTheRobotBackwards = False
# set defaults here
self.x = 0
self.y = 0
self.rotation = 0
self.gyro = gyro
self.angle_constant = .040
self.gyro_enabled = True
self.robotDrive = robotDrive
# Strafe stuff
self.backInfrared = backInfrared
sd = NetworkTable.getTable('SmartDashboard')
self.strafe_back_speed = sd.getAutoUpdateValue('strafe_back', .23)
self.strafe_fwd_speed = sd.getAutoUpdateValue('strafe_fwd', -.23)
# Top range: 50 is slow
# Low range: 10 is too much acceleration
self.strafe_adj = sd.getAutoUpdateValue('strafe_adj', 35)
#
# Verb functions -- these functions do NOT talk to motors directly. This
# allows multiple callers in the loop to call our functions without
# conflicts.
#
def move(self, y, x, rotation, squaredInputs=False):
'''
Causes the robot to move
:param x: The speed that the robot should drive in the X direction. 1 is right [-1.0..1.0]
:param y: The speed that the robot should drive in the Y direction. -1 is forward. [-1.0..1.0]
:param rotation: The rate of rotation for the robot that is completely independent of the translation. 1 is rotate to the right [-1.0..1.0]
:param squaredInputs: If True, the x and y values will be squared, allowing for more gradual speed.
'''
if squaredInputs:
if x >= 0.0:
x = (x * x)
else:
x = -(x * x)
if y >= 0.0:
y = (y * y)
else:
y = -(y * y)
self.x = x
self.y = y
self.rotation = max(min(1.0, rotation), -1) / 2.0
def set_gyro_enabled(self, value):
'''Enables the gyro
:param value: Whether or not the gyro is enabled
:type value: Boolean
'''
self.gyro_enabled = value
def return_gyro_angle(self):
''' Returns the gyro angle'''
return self.gyro.getAngle()
def reset_gyro_angle(self):
'''Resets the gyro angle'''
self.gyro.reset()
def set_angle_constant(self, constant):
'''Sets the constant that is used to determine the robot turning speed'''
self.angle_constant = constant
def angle_rotation(self, target_angle):
'''
Adjusts the robot so that it points at a particular angle. Returns True
if the robot is near the target angle, False otherwise
:param target_angle: Angle to point at, in degrees
:returns: True if near angle, False otherwise
'''
if not self.gyro_enabled:
return False
angleOffset = target_angle - self.return_gyro_angle()
if angleOffset < -1 or angleOffset > 1:
self.rotation = angleOffset * self.angle_constant
self.rotation = max(min(0.3, self.rotation), -0.3)
return False
return True
def set_direction(self, direction):
'''Used to reverse direction'''
self.isTheRobotBackwards = bool(direction)
def switch_direction(self):
'''when called the robot will reverse front/back'''
self.isTheRobotBackwards = not self.isTheRobotBackwards
def wall_goto(self):
'''back up until we are 16 cm away from the wall. Fake PID will move us closer and further to the wall'''
y = (self.backInfrared.getDistance() - 16.0)/self.strafe_adj.value
y = max(min(self.strafe_back_speed.value, y), self.strafe_fwd_speed.value)
self.y = y
return y
def wall_strafe(self, speed):
'''strafe against the wall'''
self.wall_goto()
self.x = speed
self.angle_rotation(0)
#
# Actually tells the motors to do something
#
def doit(self):
''' actually makes the robot drive'''
if(self.isTheRobotBackwards):
self.robotDrive.mecanumDrive_Cartesian(-self.x, -self.y, self.rotation , 0)
else:
self.robotDrive.mecanumDrive_Cartesian(self.x, self.y, self.rotation, 0)
# print('x=%s, y=%s, r=%s ' % (self.x, self.y, self.rotation))
# by default, the robot shouldn't move
self.x = 0
self.y = 0
self.rotation = 0
| apache-2.0 | 730,859,563,289,783,600 | 25.522293 | 143 | 0.670029 | false |
IntersectAustralia/asvo-tao | core/sageimport_mpi_BSP/settingReader.py | 1 | 2465 | import xml.etree.ElementTree as ET
import logging
## Helps reading XML setting file into a Hash Table of Running options and Tuples array which describes the SAGE fields and their data types
def ParseParams(FilePath):
## Init Return value
CurrentSAGEStruct=[]
RunningOptions=dict()
###############################################################################
###### Parse XML and load it as tree
tree = ET.ElementTree(file=FilePath)
SettingsNode = tree.getroot()
################################################################################
#### The first Node contain the sage fields and their data type mapping
#### Load them into tuple list (ordered list- The order is important)
sageFieldsNode=SettingsNode[0]
for sagefield in sageFieldsNode:
ExportInDB=True
if sagefield.attrib.has_key('DBExport') ==True:
ExportInDB=(sagefield.attrib['DBExport']=="1")
if sagefield.attrib.has_key('DBFieldName') ==False:
CurrentSAGEStruct.append([sagefield.text,sagefield.attrib['Type'],sagefield.text,ExportInDB])
else:
CurrentSAGEStruct.append([sagefield.text,sagefield.attrib['Type'],sagefield.attrib['DBFieldName'],ExportInDB])
##################################################################################
## Load PostGres information
## Running Options and PostgreSQL DB information will take the form of ["Header"."Child"]
pgNode=SettingsNode[1]
RunningOptions[pgNode.tag+':TreeTablePrefix']= pgNode.findall('TreeTablePrefix')[0].text
RunningOptions[pgNode.tag+':NewDBName']= pgNode.findall('NewDBName')[0].text
RunningOptions[pgNode.tag+':NewDBAlias']= pgNode.findall('NewDBAlias')[0].text
RunningOptions[pgNode.tag+':ServersCount']= pgNode.findall('ServersCount')[0].text
serversList=pgNode.findall('serverInfo')
ServerIndex=0
for pgfield in serversList:
for pgserverinfo in pgfield:
RunningOptions[pgNode.tag+':'+pgfield.tag+str(ServerIndex)+":"+pgserverinfo.tag]= pgserverinfo.text
ServerIndex=ServerIndex+1
##########################################################################
RunningSettingsNode=SettingsNode[2]
for settingfield in RunningSettingsNode:
RunningOptions[RunningSettingsNode.tag+':'+settingfield.tag]= settingfield.text
return [CurrentSAGEStruct,RunningOptions] | gpl-3.0 | -96,981,286,046,738,220 | 47.352941 | 140 | 0.604868 | false |
littmus/kutime_web | kutime/models.py | 1 | 4889 | #-*- coding:utf-8 -*-
from django.db import models
class College(models.Model):
number = models.CharField(max_length=4)
name = models.CharField(max_length=50)
CHOICES_CAMPUS = (
('A', u'안암'),
('S', u'세종'),
('G', u'대학원'),
('E', u'기타'),
)
campus = models.CharField(max_length=1, choices=CHOICES_CAMPUS, null=True)
CHOICES_TYPE = (
('M', u'학부 전공'),
('E', u'학부 교양/교직/기타'),
('G', u'대학원 전공'),
)
type = models.CharField(max_length=1, choices=CHOICES_TYPE)
class Meta:
app_label = 'kutime'
unique_together = (('number', 'campus'))
def __unicode__(self):
return self.name
class Department(models.Model):
col = models.ForeignKey(College)
number = models.CharField(max_length=4)
name = models.CharField(max_length=50)
class Meta:
app_label = 'kutime'
def __unicode__(self):
return u'%s - %s' % (self.col.name, self.name)
class DayAndPeriod(object):
day = None
period_start = None
period_end = None
def __init__(self, dayAndPeriod=None):
if dayAndPeriod is not None:
try:
day, period = dayAndPeriod.split('(')
self.day = day
period = period[:-1]
if '-' in period:
self.period_start, self.period_end = map(int, period.split('-'))
else:
self.period_start = self.period_end = int(period)
except Exception as e:
print str(e)
self.day = None
self.period_start = None
self.period_end = None
def __unicode__(self):
if self.period_start == self.period_end:
return u'%s(%d)' % (self.day, self.period_start)
else:
return u'%s(%d-%d)' % (self.day, self.period_start, self.period_end)
class DayAndPeriodField(models.CharField):
description = "A field for day and period pair"
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
self.name = "DayAndPeriodField"
super(DayAndPeriodField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value is None:
return ''
if isinstance(value, DayAndPeriod):
return value
if isinstance(value, list):
return ','.join([unicode(v) for v in value])
return value
def get_db_prep_value(self, value, connection=None, prepared=False):
if isinstance(value, basestring):
return value
elif isinstance(value, list):
return ','.join([unicode(v) for v in value])
elif isinstance(value, DayAndPeriod):
return unicode(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
class Lecture(models.Model):
year = models.IntegerField()
CHOICES_SEMESTER = (
('1R', u'1학기'),
('1S', u'여름학기'),
('2R', u'2학기'),
('2W', u'겨울학기'),
('SC', u'국제하계대학'),
)
semester = models.CharField(max_length=2, choices=CHOICES_SEMESTER)
col = models.ForeignKey(College)
dept = models.ForeignKey(Department)
number = models.CharField(max_length=7)
placement = models.CharField(max_length=2)
comp_div = models.CharField(max_length=50)
title = models.CharField(max_length=200)
professor = models.CharField(max_length=200)
credit = models.IntegerField()
time = models.IntegerField()
dayAndPeriod = DayAndPeriodField(max_length=500, null=True)
classroom = models.CharField(max_length=50, null=True)
isEnglish = models.BooleanField(default=False)
isRelative = models.BooleanField(default=True)
isLimitStudent = models.BooleanField(default=True)
isWaiting = models.BooleanField(default=True)
isExchange = models.BooleanField(default=True)
isSelfAttendCheck = models.BooleanField(default=False)
isNoSupervision = models.BooleanField(default=False)
note = models.TextField(null=True)
class Meta:
app_label = 'kutime'
unique_together = (('number', 'placement'))
def link_lecture_plan(self):
url = 'http://infodepot.korea.ac.kr/lecture1/lecsubjectPlanView.jsp?%(year)d&term=%(term)s&grad_cd=%(cols)s&dept_cd=%(dept)s&cour_cd=%(lec_num)s&cour_cls=%(placement)s' % {
'year': self.year,
'term': self.semester,
'cols': self.col.number,
'dept': self.dept.number,
'lec_num': self.number,
'placement': self.placement,
}
return url
def __unicode__(self):
return u'%s - %s' % (self.title, self.professor)
| mit | -4,243,619,782,773,681,700 | 29.00625 | 180 | 0.576547 | false |
zhzhzoo/pdfrw | tests/pagewrite.py | 1 | 2283 | #!/usr/bin/env python
import sys
import os
import traceback
import time
import gc
import hashlib
#gc.disable()
sys.path.insert(0, '../../PyPDF2/')
import PyPDF2
import find_pdfrw
import pdfrw
from PyPDF2 import PdfFileReader, PdfFileWriter
import find_pdfrw
from pdfrw import PdfReader, PdfWriter, PdfParseError
allfiles = (x.split('#',1)[0] for x in open('data/allpdfs.txt').read().splitlines())
allfiles = [x for x in allfiles if x]
badfiles = []
goodfiles = []
times = []
sys.setrecursionlimit(20000)
outdir = 'testout'
if not os.path.exists(outdir):
os.mkdir(outdir)
if 0:
reader, writer = PyPDF2.PdfFileReader, PyPDF2.PdfFileWriter
else:
reader, writer = pdfrw.PdfReader, pdfrw.PdfWriter
pdferr = pdfrw.PdfParseError
def test_pdf(pdfname):
outfn = os.path.join(outdir, hashlib.md5(pdfname).hexdigest() + '.pdf')
pdf_in = reader(open(pdfname))
pdf_out = writer()
for pg_num in range(pdf_in.numPages):
pdf_out.addPage(pdf_in.getPage(pg_num))
out_stream = open(outfn, "wb")
pdf_out.write(out_stream)
out_stream.close()
try:
for fname in allfiles:
#print >> sys.stderr, "File name", fname
print "File name", fname
sys.stdout.flush()
start = time.time()
try:
test_pdf(fname)
except Exception, s:
sys.stderr.flush()
ok = False
if isinstance(s, PdfParseError):
print '[ERROR]', s
else:
print traceback.format_exc()[-2000:]
#raise
else:
sys.stderr.flush()
ok = True
elapsed = time.time() - start
print ok and "[OK]" or "[FAIL]"
print
(badfiles, goodfiles)[ok].append(fname)
times.append((elapsed, fname))
except KeyboardInterrupt:
raise
pass
print "Total = %s, good = %s, bad = %s" % (len(times), len(goodfiles), len(badfiles))
times.sort()
times.reverse()
f = open('log.txt', 'a')
print >> f, '\n\n\n\n\n\n***************************************************************************\n\n\n'
for fname in goodfiles:
print >> f, 'good', fname
print >> f
for fname in badfiles:
print >> f, 'bad', fname
print >> f
for stuff in times:
print >> f, '%0.2f %s' % stuff
f.close()
| mit | -889,301,502,289,259,000 | 22.295918 | 107 | 0.587385 | false |
mattesCZ/mtbmap | map/altitude.py | 1 | 12426 | # -*- coding: utf-8 -*-
# Global imports
from numpy import *
import zipfile
# Django imports
from django.utils.translation import ugettext as _
# Local imports
from .printing import svg_string_to_png
from django.conf import settings
from routing.mathfunctions import haversine
NONE_HEIGHT = -32768
def height(point):
"""
Get height of point with coordinates: (lat,lon)
"""
hgt = ProfileNode(point[0], point[1]).srtm_height()
if hgt < 0:
return NONE_HEIGHT
else:
return hgt
def hgt_file_key(lat, lon):
"""
Compute height file key for given coordinates.
Format is (N|S).nn.(W|E).nnn
"""
ret_value = ''
if lat < 0:
lat = abs(lat) + 1
ret_value += 'S'
else:
ret_value += 'N'
ret_value += _zero_prefix(int(math.floor(lat)), 2)
if lon < 0:
lon = abs(lon) + 1
ret_value += 'W'
else:
ret_value += 'E'
ret_value += _zero_prefix(int(math.floor(lon)), 3)
return ret_value
def _zero_prefix(integer, length=3):
"""
Prepend zeros to have correct length.
"""
value = str(integer)
return '0'*(length - len(value)) + value
class HgtFile:
def __init__(self, node):
self.key = hgt_file_key(node.lat, node.lon)
zip_path = settings.SRTM_DATA
zip_file = zipfile.ZipFile(zip_path + self.key + '.hgt.zip', 'r')
zip_file_name = zip_file.namelist()[0]
hgt_string = zip_file.read(zip_file_name)
zip_file.close()
self.file = flipud(((fromstring(string=hgt_string, dtype='int16')).byteswap(False)).reshape(1201, 1201))
def height(self, lat, lon):
"""
Get height of corresponding pixel value in height file.
"""
return self.file[self.coord2array(lat)][self.coord2array(lon)]
@staticmethod
def coord2array(coord):
"""
Procedure which maps given latitude or longitude coordination to
hgtArray rows or columns.
"""
dec_part = coord - math.floor(coord)
return int(round(1200*dec_part))
class AltitudeProfile:
def __init__(self, points):
self.input_nodes = [ProfileNode(point[0], point[1]) for point in points]
self.sumdist = self.compute_distances(self.input_nodes)
self.nodes = self._insert_nodes(self.input_nodes)
self.status = self._initialize_hgt_files()
self.status -= self._compute_heights()
def svg_profile(self):
"""
Draws svg file from given nodes.
"""
if self.status != 0:
return NONE_HEIGHT
svg = ''
# write SVG headers
svg += '<?xml version="1.0" encoding="UTF-8"?>\n'
svg += '<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN"'
svg += ' "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">\n'
svg += '<svg width="1010" height="300" viewBox="0 0 1010 300" xmlns="http://www.w3.org/2000/svg"'
svg += ' xmlns:xlink="http://www.w3.org/1999/xlink">\n'
# draw altitude fields
svg += ' <rect x="0" y="0" width="1010" height="300" fill="white" />\n'
svg += ' <rect x="35" y="30" width="950" height="246" fill="none" stroke="black" stroke-width="1" />\n'
max_height = -10000
min_height = 10000
# find min/max height
for node in self.nodes:
if node.height > max_height:
max_height = node.height
if node.height < min_height:
min_height = node.height
# avoids division by zero
if max_height == min_height:
max_height += 1
# constants for mapping real distances and heights into the picture
norm_y = float(max_height - min_height) / 200
norm_x = self.sumdist / 950
# begin drawing polyline
svg += ' <path fill="none" stroke="black" stroke-width="2" stroke-linecap="round"'
svg += ' stroke-linejoin="round" d="M '
x = 35
difference = 0
for node in self.nodes:
# xf is the shift on x-axis, diff is the remainder after rounding
xf = (node.dist/norm_x) + difference
difference = xf - round(xf)
x = x + round(xf)
y = 255 - round((node.height-min_height) / norm_y)
svg += str(int(x)) + " " + str(int(y)) + " L "
svg += str(int(x)) + " " + str(int(y))
max_y = int(255 - round(((max_height-min_height) / norm_y)))
# finish drawing polyline
svg += '"/>\n'
# print height lines
svg += ' <text fill="black" font-family="sans" font-size="12" text-anchor="end" x="30" y="256">' + str(min_height) + '</text>\n'
svg += ' <line stroke = "red" stroke-dasharray="2,2" x1="35" y1="255" x2="985" y2="255"/>\n'
svg += ' <text fill="black" font-family="sans" font-size="12" text-anchor="end" x="30"'
svg += ' y="' + str(max_y + 4) + '">' + str(max_height) + '</text>\n'
svg += ' <line stroke = "red" stroke-dasharray="2,2"'
svg += ' x1="35" y1="' + str(max_y) + '" x2="985" y2="' + str(max_y) + '"/>\n'
svg += ' <text fill="black" font-family="sans" font-size="12" text-anchor="middle" x="985"'
svg += ' y="288">' + str(round(self.sumdist, 1)) + ' km</text>\n'
# assign 'h' max_height floored to hundreds
h = (max_height/100) * 100
# avoid label collisions
if (max_height-h) < (max_height-min_height)/20.0:
h -= 100
while h > (min_height + (max_height-min_height)/10): # condition avoids label collisions
h_coord = int(255-round((h-min_height)/norm_y))
svg += ' <line stroke = "black" stroke-dasharray="2,2"'
svg += ' x1="35" y1="' + str(h_coord) + '" x2="985" y2="' + str(h_coord) + '"/>\n'
svg += ' <text fill="black" font-family="sans" font-size="12" text-anchor="end" x="30"'
svg += ' y="' + str(h_coord + 4) + '">' + str(h) + '</text>\n'
h -= 100
# print distance markers, +/- 5 markers
if self.sumdist > 25:
step = int(round(self.sumdist/50)*10)
elif self.sumdist > 2.5:
step = int(round(self.sumdist/5))
else:
step = 0.2
dist = step
svg += ' <text fill="black" font-family="sans" font-size="12" text-anchor="middle" x="35" y="288">0</text>\n'
while dist < self.sumdist - self.sumdist/20: # condition 'self.sumdist/20' avoids label collision
svg += ' <line stroke ="black" x1="' + str(round(dist/norm_x) + 35) + '" y1="276"'
svg += ' x2="' + str(round(dist/norm_x) + 35) + '" y2="269"/>\n'
svg += ' <text fill="black" font-family="sans" font-size="12" text-anchor="middle" x="' + str(round(dist/norm_x) + 35)
svg += '" y="288">' + str(dist) + '</text>\n'
dist += step
# print ascending and descending
ascdesc = self.ascending()
svg += ' <text fill="black" font-family="sans" font-size="12" text-anchor="middle" x="550" y="20">%s: %i %s: %i</text>\n' % (
_('Ascending'), ascdesc[0],
_('Descending'), ascdesc[1])
svg += ' <text fill="black" font-family="sans" font-size="12" x="2" y="25">%s (m)</text>\n' % _('Height')
# print SVG end element
svg += '</svg>'
return svg
def png_profile(self):
"""
Create PNG image from SVG file.
"""
if self.status != 0:
return NONE_HEIGHT
svg = self.svg_profile()
return svg_string_to_png(svg)
@staticmethod
def compute_distances(nodes):
"""
Compute distance to previous node and sum of distances.
"""
sumdist = 0
nodes[0].dist = 0
for i in range(len(nodes)-1):
dist = haversine(nodes[i].lon, nodes[i].lat, nodes[i+1].lon, nodes[i+1].lat)
sumdist += dist
nodes[i+1].dist = dist
return sumdist
def _insert_nodes(self, nodes):
"""
Adds some nodes if the distance between the given nodes is too long.
"""
# Threshold defines which nodes should be divided.
# If the overall distance is less than 50 km, threshold is 100m, because of
# the granularity of height data.
if self.sumdist < 50:
threshold = 0.1
else:
threshold = self.sumdist/500
analyzed_nodes = [nodes[0]]
previous = nodes[0]
for node in nodes[1:]:
# if distance between current and previous node is greater than threshold
if node.dist > threshold:
# steps is the number of added nodes
steps = int(math.floor(node.dist/threshold))
dlat = (node.lat - previous.lat)/steps
dlon = (node.lon - previous.lon)/steps
# add nodes
for step in range(steps):
newlat = analyzed_nodes[-1].lat + dlat
newlon = analyzed_nodes[-1].lon + dlon
new_node = ProfileNode(newlat, newlon)
new_node.dist = haversine(analyzed_nodes[-1].lon, analyzed_nodes[-1].lat, newlon, newlat)
analyzed_nodes.append(new_node)
index = len(analyzed_nodes) - 1
node.dist = node.dist - analyzed_nodes[index].dist
analyzed_nodes.append(node)
previous = node
# return new list of nodes
return analyzed_nodes
def _initialize_hgt_files(self):
"""
Open all height files needed just once.
"""
hgt_files = {}
for node in self.nodes:
file_key = hgt_file_key(node.lat, node.lon)
if not (file_key in hgt_files):
try:
hgt_files[file_key] = HgtFile(node)
except IOError:
return NONE_HEIGHT
node.hgt_file = hgt_files[file_key]
return 0
def _compute_heights(self):
"""
Compute height for all nodes. Missing data is interpolated based on
nearest neighbor height data.
"""
for i in range(len(self.nodes)):
node = self.nodes[i]
node.height = node.srtm_height()
if (node.height == NONE_HEIGHT) and (i > 1):
node.height = self.nodes[i - 1].height
# Parsing missing height data
if node.height == NONE_HEIGHT:
j = 1
while (self.nodes[j].height == NONE_HEIGHT) and j < len(self.nodes) - 1:
j += 1
if j == len(self.nodes) - 1:
return NONE_HEIGHT
while j > 0:
self.nodes[j - 1].height = self.nodes[j].height
j -= 1
if self.nodes[0].height == NONE_HEIGHT:
# First node has missing height, find first node with height and
# copy this value to all previous nodes.
j = 1
while (self.nodes[j].height == NONE_HEIGHT) and j < len(self.nodes) - 1:
j += 1
if j == len(self.nodes) - 1:
return NONE_HEIGHT
while j > 0:
self.nodes[j - 1].height = self.nodes[j].height
j -= 1
return 0
def ascending(self):
"""
Counts total ascending and descending.
"""
asc = 0
desc = 0
for i in range(len(self.nodes) - 1):
dif = self.nodes[i + 1].height - self.nodes[i].height
if dif > 0:
asc = asc + dif
else:
desc = desc - dif
return [asc, desc]
class ProfileNode:
def __init__(self, latitude, longitude):
self.lat = latitude
self.lon = longitude
self.dist = 0
self.height = None
self.hgt_file = None
def __unicode__(self):
return '%s, %s, %sm' % (self.lat, self.lon, self.height)
def srtm_height(self):
"""
Returns height of a point in SRTM file.
None value is NONE_HEIGHT
"""
if self.hgt_file is None:
try:
self.hgt_file = HgtFile(self)
except IOError:
# File not found
return NONE_HEIGHT
return self.hgt_file.height(self.lat, self.lon)
| gpl-3.0 | 3,056,943,141,313,523,700 | 37.116564 | 139 | 0.524787 | false |
keras-team/autokeras | autokeras/preprocessors/encoders.py | 1 | 3609 | # Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from autokeras.engine import preprocessor
class Encoder(preprocessor.TargetPreprocessor):
"""Transform labels to encodings.
# Arguments
labels: A list of labels of any type. The labels to be encoded.
"""
def __init__(self, labels, **kwargs):
super().__init__(**kwargs)
self.labels = [
label.decode("utf-8") if isinstance(label, bytes) else str(label)
for label in labels
]
def get_config(self):
return {"labels": self.labels}
def fit(self, dataset):
return
def transform(self, dataset):
"""Transform labels to integer encodings.
# Arguments
dataset: tf.data.Dataset. The dataset to be transformed.
# Returns
tf.data.Dataset. The transformed dataset.
"""
keys_tensor = tf.constant(self.labels)
vals_tensor = tf.constant(list(range(len(self.labels))))
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1
)
return dataset.map(lambda x: table.lookup(tf.reshape(x, [-1])))
class OneHotEncoder(Encoder):
def transform(self, dataset):
"""Transform labels to one-hot encodings.
# Arguments
dataset: tf.data.Dataset. The dataset to be transformed.
# Returns
tf.data.Dataset. The transformed dataset.
"""
dataset = super().transform(dataset)
eye = tf.eye(len(self.labels))
dataset = dataset.map(lambda x: tf.nn.embedding_lookup(eye, x))
return dataset
def postprocess(self, data):
"""Transform probabilities back to labels.
# Arguments
data: numpy.ndarray. The output probabilities of the classification head.
# Returns
numpy.ndarray. The original labels.
"""
return np.array(
list(
map(
lambda x: self.labels[x],
np.argmax(np.array(data), axis=1),
)
)
).reshape(-1, 1)
class LabelEncoder(Encoder):
"""Transform the labels to integer encodings."""
def transform(self, dataset):
"""Transform labels to integer encodings.
# Arguments
dataset: tf.data.Dataset. The dataset to be transformed.
# Returns
tf.data.Dataset. The transformed dataset.
"""
dataset = super().transform(dataset)
dataset = dataset.map(lambda x: tf.expand_dims(x, axis=-1))
return dataset
def postprocess(self, data):
"""Transform probabilities back to labels.
# Arguments
data: numpy.ndarray. The output probabilities of the classification head.
# Returns
numpy.ndarray. The original labels.
"""
return np.array(
list(map(lambda x: self.labels[int(round(x[0]))], np.array(data)))
).reshape(-1, 1)
| apache-2.0 | -3,504,195,265,757,688,300 | 29.075 | 85 | 0.612081 | false |
osteth/project-firewatch | projectfirewatch/scripts/cli.py | 1 | 7675 | # coding: utf-8
from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
#FlaskAPI docs avilable at http://www.flaskapi.org/
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map, icons
import json, csv, click, wget, os, time, requests
port = 5000
debug = True
reload = True
path = os.path.realpath(__file__).replace(__file__, '')
app = FlaskAPI(__name__, template_folder="templates")
# you can set key as config
app.config['GOOGLEMAPS_KEY'] = "AIzaSyByCF9JlHWGthilogp3Q-Y1qiNaqRtZ6ZQ"
# you can also pass key here
GoogleMaps(app, key="AIzaSyByCF9JlHWGthilogp3Q-Y1qiNaqRtZ6ZQ")
#begin CLI Portion
@click.group()
def cli(*args, **kwargs):
"""Command line utility to easily convert .csv data over to .json data.
This utility was built for the NASA space apps challenge and is defaulted
to be used with cron to pull in data from
https://earthdata.nasa.gov/earth-observation-data/near-real-time/firms/active-fire-data
convert it to json and load it into a database to overcome the lack of
a useable api for this data.
Usage: python3 csv-json.py convert -i <input file path> -o <output file path>"""
pass
@click.command(help='Retrieves MODIS satelite CSV data and coverts data from csv to json.')
@click.option('--input', '-i', default=path + 'MODIS_C6_Global_24h.csv', help='--input , -i Sets the file that is to be converted')
@click.option('--output', '-o', default=path + 'MODIS_C6_Global_24h.json', help='--output, -o, Sets the name of the output.')
def update(input, output):
try:
os.remove(path + 'MODIS_C6_Global_24h.json')
except OSError:
pass
MODISurl = 'https://firms.modaps.eosdis.nasa.gov/active_fire/c6/text/MODIS_C6_Global_24h.csv'
filename = wget.download(MODISurl, path)
csvfile = open(input, 'r')
jsonfile = open(output, 'w')
reader = csv.DictReader(csvfile)
for row in reader:
json.dump(row, jsonfile)
jsonfile.write('\n')
try:
os.remove(path + 'MODIS_C6_Global_24h.csv')
except OSError:
pass
return filename
@click.command(help='Start/Stop the mapping and API server.')
def start():
app.run(host='0.0.0.0', port=port, debug=debug, use_reloader=reload)
return 'Server Started'
cli.add_command(update)
cli.add_command(start)
#End CLI portion
#begin Auxiliary Functions
def LLR():
table = []
llr_table = []
count = 0
with open('MODIS_C6_Global_24h.json', 'r') as data_file:
for line in data_file:
try:
j = line.split('|')[-1]
table.append(json.loads(j))
except ValueError:
print("Bad Json File!")
continue
for row in table:
lon = float(row['longitude'])
lat = float(row['latitude'])
scan = row['scan']
track = row['track']
radius = (float(scan) * float(track) / 2) * 750 #kilometers
radius = round(radius, 2) #round to two decimal places
stroke_color = "FF0000"
fill_color = "FF0000"
if count < 3240 and lat < upper and lat > lower and lon > left and lon < right:
llr_table.append([lat,lon,radius])
count = count + 1
return llr_table
def get_ip():
'''finds the useres IP address and returns it.'''
ipdata = requests.get('http://jsonip.com/')
ipresp = ipdata.json()
ip = ipresp.get('ip')
return ip
def geoip(ip):
'''retireves and reports users geoip information'''
resp = requests.get('http://freegeoip.net/json/' + ip)
data = resp.json()
return(data)
def geoip_coords(ip):
'''retrieves and reports users geoip infromations limited down to
location coordinates only'''
resp = requests.get('http://freegeoip.net/json/' + ip)
data = resp.json()
lat = data.get('latitude')
lng = data.get('longitude')
return(lat,lng)
#End Auxilary Functions
#Begin API
@app.route("/api/", methods=['GET', 'POST'])
def Project_Firewatch():
'''
Returns all Project Firewatch data in JSON format. See Documentation for filering options. https://github.com/osteth/project-firewatch
'''
SatDataTable = []
with open('MODIS_C6_Global_24h.json', 'r') as SatData:
for line in SatData:
try:
j = line.split('|')[-1]
SatDataTable.append(json.loads(j))
except ValueError:
print("Bad Json File!")
continue
return {"Satellite Data": SatDataTable}
#End API
#Bgin Map
@app.route('/')
def fullmap():
global lat, lng, left, right, upper, lower
ip = request.remote_addr
lat, lng = geoip_coords(ip)
upper = lat + 25.0
right = lng + 25.0
lower = lat - 25.0
left = lng - 25.0
firedata = LLR()
fullmap = Map(
identifier="fullmap",
varname="fullmap",
style=(
"height:100%;"
"width:100%;"
"top:0;"
"left:0;"
"position:absolute;"
"z-index:200;"
),
lat= lat,
lng= lng,
markers=[
{
'icon': '//maps.google.com/mapfiles/ms/icons/green-dot.png',
'lat': lat,
'lng': lng,
'infobox': "<center><h2>Your Location is " + str(lat) + ", " + str(lng) + ".</br>"
"Sign-up for Personalized "
"Wildfire Email Notifications.</h2>"
"<button type=\"submit\" class=\"signupbtn\">Sign Up</button></center>"
},
{
'icon': '//maps.google.com/mapfiles/ms/icons/blue-dot.png',
'lat': 34.716323,
'lng': -86.594007,
'infobox': "Sensor: 2, Temp: 86, humidity: 46% ALERT: False"
},
{
'icon': '//maps.google.com/mapfiles/ms/icons/blue-dot.png',
'lat': 34.715508,
'lng': -86.598972,
'infobox': "Sensor: 3, Temp: 86, humidity: 46% ALERT: False"
},
{
'icon': '//maps.google.com/mapfiles/ms/icons/yellow-dot.png',
'lat': 34.714402,
'lng': -86.599079,
'infobox': "Sensor: 4, Temp: 124, humidity: 8% ALERT: Upcoming Alert Probable"
},
{
'icon': '//maps.google.com/mapfiles/ms/icons/red-dot.png',
'lat': 34.713745,
'lng': -86.597834,
'infobox': "Sensor: 5, Temp: overload, humidity: 0% ALERT: TRUE"
},
{
'icon': '//maps.google.com/mapfiles/ms/icons/blue-dot.png',
'lat': 334.713767,
'lng': -86.596396,
'infobox': "Sensor: 6, Temp: 86, humidity: 46% ALERT: False"
},
{
'icon': '//maps.google.com/mapfiles/ms/icons/blue-dot.png',
'lat': 34.713811,
'lng': -86.594352,
'infobox': "Sensor: 7, Temp: 86, humidity: 46% ALERT: False"
}
],
rectangle = {
'stroke_color': '#4286f4',
'stroke_opacity': 1,
'stroke_weight': 25,
'fill_color': '#4286f4',
'fill_opacity': 1,
'bounds': {
'north': upper,
'south': lower,
'east': right,
'west': left
}
},
circles=firedata,
maptype="TERRAIN",
zoom="7"
)
return render_template('example_fullmap.html', fullmap=fullmap)
#End Map
if __name__ == "__main__":
cli()
| mit | -6,832,263,758,543,335,000 | 30.199187 | 139 | 0.554919 | false |
osks/pylyskom | tests/test_requests.py | 1 | 7959 | # -*- coding: utf-8 -*-
import pytest
from pylyskom.protocol import MAX_TEXT_SIZE
from pylyskom.datatypes import (
AuxItemInput, PrivBits, ConfType, ExtendedConfType, LocalTextNo, InfoOld, CookedMiscInfo,
MIRecipient)
from pylyskom import requests, komauxitems
def test_all_requests_has_response_type():
for k in requests.Requests.__dict__:
if k.startswith("__"):
continue
call_no = getattr(requests.Requests, k)
assert call_no in requests.response_dict
def test_request_constructor():
with pytest.raises(TypeError):
requests.Request()
def test_ReqLogout():
r = requests.ReqLogout()
assert r.to_string() == b"1\n"
def test_ReqChangeConference():
r = requests.ReqChangeConference(14506)
assert r.to_string() == b"2 14506\n"
def test_ReqChangeConference_constructor():
r1 = requests.ReqChangeConference(14506)
assert r1.to_string() == b"2 14506\n"
assert r1.conf_no == 14506
r2 = requests.ReqChangeConference(conf_no=14506)
assert r2.to_string() == b"2 14506\n"
assert r2.conf_no == 14506
with pytest.raises(TypeError):
requests.ReqChangeConference()
with pytest.raises(TypeError):
requests.ReqChangeConference(14506, conf_no=14506)
with pytest.raises(TypeError):
requests.ReqChangeConference(foo=14506)
def test_ReqChangeName():
r = requests.ReqChangeName(14506, u'R\xe4ksm\xf6rg\xe5s'.encode('latin1'))
assert r.to_string() == b"3 14506 10HR\xe4ksm\xf6rg\xe5s\n"
def test_ReqChangeName_constructor():
r1 = requests.ReqChangeName(14506, b"foo")
assert r1.to_string() == b"3 14506 3Hfoo\n"
assert r1.conf_no == 14506
r2 = requests.ReqChangeName(conf_no=14506, new_name=b"foo")
assert r2.to_string() == b"3 14506 3Hfoo\n"
assert r2.conf_no == 14506
r3 = requests.ReqChangeName(14506, new_name=b"foo")
assert r3.to_string() == b"3 14506 3Hfoo\n"
assert r3.conf_no == 14506
with pytest.raises(TypeError):
requests.ReqChangeName()
with pytest.raises(TypeError):
requests.ReqChangeName(14506, conf_no=14506)
with pytest.raises(TypeError):
requests.ReqChangeName("foo", conf_no=14506)
with pytest.raises(TypeError):
requests.ReqChangeName(14506, "foo", conf_no=14506)
with pytest.raises(TypeError):
requests.ReqChangeName(14506, "foo", new_name="foo")
with pytest.raises(TypeError):
requests.ReqChangeName(14506, "foo", conf_no=14506, new_name="foo")
def test_ReqChangeWhatIAmDoing():
r = requests.ReqChangeWhatIAmDoing(b'what')
assert r.to_string() == b"4 4Hwhat\n"
def test_ReqSetPrivBits():
priv = PrivBits()
r = requests.ReqSetPrivBits(14506, priv)
assert r.to_string() == b"7 14506 0000000000000000\n"
def test_ReqSetPasswd():
r = requests.ReqSetPasswd(14506, b"oldpwd", b"newpassword")
assert r.to_string() == b"8 14506 6Holdpwd 11Hnewpassword\n"
def test_ReqDeleteConf():
r = requests.ReqDeleteConf(14506)
assert r.to_string() == b"11 14506\n"
def test_ReqSubMember():
r = requests.ReqSubMember(14506, 14507)
assert r.to_string() == b"15 14506 14507\n"
def test_ReqSetPresentation():
r = requests.ReqSetPresentation(14506, 4711)
assert r.to_string() == b"16 14506 4711\n"
def test_ReqSetEtcMoTD():
r = requests.ReqSetEtcMoTD(14506, 4711)
assert r.to_string() == b"17 14506 4711\n"
def test_ReqSetSupervisor():
r = requests.ReqSetSupervisor(6, 14506)
assert r.to_string() == b"18 6 14506\n"
def test_ReqSetPermittedSubmitters():
r = requests.ReqSetPermittedSubmitters(123, 456)
assert r.to_string() == b"19 123 456\n"
def test_ReqSetSuperConf():
r = requests.ReqSetSuperConf(14506, 6)
assert r.to_string() == b"20 14506 6\n"
def test_ReqSetConfType():
conf_type = ConfType([1, 1, 1, 1])
r1 = requests.ReqSetConfType(14506, conf_type)
assert r1.to_string() == b"21 14506 11110000\n"
ext_conf_type = ExtendedConfType()
r2 = requests.ReqSetConfType(14506, ext_conf_type)
assert r2.to_string() == b"21 14506 00000000\n"
def test_ReqGetText():
r = requests.ReqGetText(123, 17, 65535)
assert r.to_string() == b"25 123 17 65535\n"
def test_ReqGetText_with_defaults():
r = requests.ReqGetText(123)
assert r.to_string() == b"25 123 0 %d\n" % MAX_TEXT_SIZE
def test_ReqGetText_with_default_endchar():
r = requests.ReqGetText(123, 0)
assert r.to_string() == b"25 123 0 %d\n" % MAX_TEXT_SIZE
def test_ReqGetText_with_default_start_char():
r = requests.ReqGetText(123, start_char=17)
assert r.to_string() == b"25 123 17 %d\n" % MAX_TEXT_SIZE
def test_ReqMarkAsRead():
r = requests.ReqMarkAsRead(14506, [])
assert r.to_string() == b"27 14506 0 { }\n"
r = requests.ReqMarkAsRead(14506, [LocalTextNo(17), LocalTextNo(4711)])
assert r.to_string() == b"27 14506 2 { 17 4711 }\n"
r = requests.ReqMarkAsRead(14506, [17, 4711])
assert r.to_string() == b"27 14506 2 { 17 4711 }\n"
def test_ReqAddRecipient():
r = requests.ReqAddRecipient(1, 5)
assert r.to_string() == b"30 1 5 0\n"
r = requests.ReqAddRecipient(1, 5, 0)
assert r.to_string() == b"30 1 5 0\n"
r = requests.ReqAddRecipient(1, 5, 1)
assert r.to_string() == b"30 1 5 1\n"
def test_ReqMarkText():
r = requests.ReqMarkText(14506, 12345)
assert r.to_string() == b"72 14506 12345\n"
def test_ReqAcceptAsync():
r1 = requests.ReqAcceptAsync([])
assert r1.to_string() == b"80 0 { }\n"
assert repr(r1) == "ReqAcceptAsync(request_list=[])"
r2 = requests.ReqAcceptAsync([1, 3])
assert r2.to_string() == b"80 2 { 1 3 }\n"
assert repr(r2) == "ReqAcceptAsync(request_list=[1, 3])"
#def test_ReqLookupZName_handles_unicode_string():
# name = u'bj\xf6rn'
# r = requests.ReqLookupZName(name, 1, 0)
# assert r.to_string() == b'76 5Hbj\xf6rn 1 0\n'
#
#def test_ReqSendMessage_handles_unicode_string():
# msg = u'hej bj\xf6rn'
# r = requests.ReqSendMessage(123, msg)
# assert r.to_string() == b'53 123 9Hhej bj\xf6rn\n'
#
#def test_ReqLogin_handles_unicode_string():
# pwd = u'xyz123bj\xf6rn'
# r = requests.ReqLogin(123, pwd)
# assert r.to_string() == '62 123 11Hxyz123bj\xf6rn 1\n'
#
#def test_ReqSetClientVersion_handles_unicode_string():
# name = u'bj\xf6rn'
# version = u'123.bj\xf6rn'
# r = requests.ReqSetClientVersion(name, version)
# assert r.to_string() == '69 5Hbj\xf6rn 9H123.bj\xf6rn\n'
def test_ReqSetInfo():
info_old = InfoOld(version=10901, conf_pres_conf=1, pers_pres_conf=2,
motd_conf=3, kom_news_conf=4, motd_of_lyskom=1080)
r = requests.ReqSetInfo(info_old)
assert r.to_string() == b"79 10901 1 2 3 4 1080\n"
def test_ReqCreateText():
misc_info = CookedMiscInfo()
misc_info.recipient_list.append(MIRecipient(recpt=14506))
aux_items = [ AuxItemInput(tag=komauxitems.AI_CREATING_SOFTWARE,
data=b"test") ]
r = requests.ReqCreateText(b'en text', misc_info, aux_items)
assert r.to_string() == b"86 7Hen text 1 { 0 14506 } 1 { 15 00000000 0 4Htest }\n"
def test_ReqCreateText_empty():
misc_info = CookedMiscInfo()
aux_items = []
r = requests.ReqCreateText(b'', misc_info, aux_items)
assert r.to_string() == b"86 0H 0 { } 0 { }\n"
def test_ReqCreateAnonymousText():
misc_info = CookedMiscInfo()
misc_info.recipient_list.append(MIRecipient(recpt=14506))
aux_items = [ AuxItemInput(tag=komauxitems.AI_CREATING_SOFTWARE,
data=b"test") ]
r = requests.ReqCreateAnonymousText('hemligt', misc_info, aux_items)
assert r.to_string() == b"87 7Hhemligt 1 { 0 14506 } 1 { 15 00000000 0 4Htest }\n"
def test_ReqCreateAnonymousText_empty():
misc_info = CookedMiscInfo()
aux_items = []
r = requests.ReqCreateAnonymousText(b'hemligt', misc_info, aux_items)
assert r.to_string() == b"87 7Hhemligt 0 { } 0 { }\n"
| gpl-2.0 | 380,927,317,214,981,250 | 34.061674 | 93 | 0.660008 | false |
emonty/ironic | ironic/virt/baremetal/driver.py | 1 | 19972 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
# Copyright (c) 2012 NTT DOCOMO, INC
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for Bare-metal platform.
"""
from oslo.config import cfg
from nova.compute import power_state
from nova import context as nova_context
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import paths
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt import driver
from nova.virt import firewall
from nova.virt.libvirt import imagecache
opts = [
cfg.BoolOpt('inject_password',
default=True,
help='Whether baremetal compute injects password or not'),
cfg.StrOpt('injected_network_template',
default=paths.basedir_def('nova/virt/'
'baremetal/interfaces.template'),
help='Template file for injected network'),
cfg.StrOpt('vif_driver',
default='nova.virt.baremetal.vif_driver.BareMetalVIFDriver',
help='Baremetal VIF driver.'),
cfg.StrOpt('volume_driver',
default='nova.virt.baremetal.volume_driver.LibvirtVolumeDriver',
help='Baremetal volume driver.'),
cfg.ListOpt('instance_type_extra_specs',
default=[],
help='a list of additional capabilities corresponding to '
'instance_type_extra_specs for this compute '
'host to advertise. Valid entries are name=value, pairs '
'For example, "key1:val1, key2:val2"'),
cfg.StrOpt('driver',
default='nova.virt.baremetal.pxe.PXE',
help='Baremetal driver back-end (pxe or tilera)'),
cfg.StrOpt('power_manager',
default='nova.virt.baremetal.ipmi.IPMI',
help='Baremetal power management method'),
cfg.StrOpt('tftp_root',
default='/tftpboot',
help='Baremetal compute node\'s tftp root path'),
]
LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
CONF.import_opt('host', 'nova.netconf')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.NoopFirewallDriver.__name__)
def _get_baremetal_node_by_instance_uuid(instance_uuid):
ctx = nova_context.get_admin_context()
node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid)
if node['service_host'] != CONF.host:
LOG.error(_("Request for baremetal node %s "
"sent to wrong service host") % instance_uuid)
raise exception.InstanceNotFound(instance_id=instance_uuid)
return node
def _update_state(context, node, instance, state):
"""Update the node state in baremetal DB
If instance is not supplied, reset the instance_uuid field for this node.
"""
values = {'task_state': state}
if not instance:
values['instance_uuid'] = None
values['instance_name'] = None
db.bm_node_update(context, node['id'], values)
def get_power_manager(**kwargs):
cls = importutils.import_class(CONF.baremetal.power_manager)
return cls(**kwargs)
class BareMetalDriver(driver.ComputeDriver):
"""BareMetal hypervisor driver."""
capabilities = {
"has_imagecache": True,
}
def __init__(self, virtapi, read_only=False):
super(BareMetalDriver, self).__init__(virtapi)
self.driver = importutils.import_object(
CONF.baremetal.driver, virtapi)
self.vif_driver = importutils.import_object(
CONF.baremetal.vif_driver)
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER)
self.volume_driver = importutils.import_object(
CONF.baremetal.volume_driver, virtapi)
self.image_cache_manager = imagecache.ImageCacheManager()
extra_specs = {}
extra_specs["baremetal_driver"] = CONF.baremetal.driver
for pair in CONF.baremetal.instance_type_extra_specs:
keyval = pair.split(':', 1)
keyval[0] = keyval[0].strip()
keyval[1] = keyval[1].strip()
extra_specs[keyval[0]] = keyval[1]
if 'cpu_arch' not in extra_specs:
LOG.warning(
_('cpu_arch is not found in instance_type_extra_specs'))
extra_specs['cpu_arch'] = ''
self.extra_specs = extra_specs
self.supported_instances = [
(extra_specs['cpu_arch'], 'baremetal', 'baremetal'),
]
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def init_host(self, host):
return
def get_hypervisor_type(self):
return 'baremetal'
def get_hypervisor_version(self):
# TODO(deva): define the version properly elsewhere
return 1
def legacy_nwinfo(self):
return True
def list_instances(self):
l = []
context = nova_context.get_admin_context()
for node in db.bm_node_get_associated(context, service_host=CONF.host):
l.append(node['instance_name'])
return l
def _require_node(self, instance):
"""Get a node's uuid out of a manager instance dict.
The compute manager is meant to know the node uuid, so missing uuid
a significant issue - it may mean we've been passed someone elses data.
"""
node_uuid = instance.get('node')
if not node_uuid:
raise exception.NovaException(_(
"Baremetal node id not supplied to driver for %r")
% instance['uuid'])
return node_uuid
def _attach_block_devices(self, instance, block_device_info):
block_device_mapping = driver.\
block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mountpoint = vol['mount_device']
self.attach_volume(
connection_info, instance['name'], mountpoint)
def _detach_block_devices(self, instance, block_device_info):
block_device_mapping = driver.\
block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mountpoint = vol['mount_device']
self.detach_volume(
connection_info, instance['name'], mountpoint)
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(
instance, network_info)
self.firewall_driver.prepare_instance_filter(
instance, network_info)
self.firewall_driver.apply_instance_filter(
instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(
instance, network_info)
def macs_for_instance(self, instance):
context = nova_context.get_admin_context()
node_uuid = self._require_node(instance)
node = db.bm_node_get_by_node_uuid(context, node_uuid)
ifaces = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
return set(iface['address'] for iface in ifaces)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
node_uuid = self._require_node(instance)
# NOTE(deva): this db method will raise an exception if the node is
# already in use. We call it here to ensure no one else
# allocates this node before we begin provisioning it.
node = db.bm_node_associate_and_update(context, node_uuid,
{'instance_uuid': instance['uuid'],
'instance_name': instance['hostname'],
'task_state': baremetal_states.BUILDING})
try:
self._plug_vifs(instance, network_info, context=context)
self._attach_block_devices(instance, block_device_info)
self._start_firewall(instance, network_info)
self.driver.cache_images(
context, node, instance,
admin_password=admin_password,
image_meta=image_meta,
injected_files=injected_files,
network_info=network_info,
)
self.driver.activate_bootloader(context, node, instance)
self.power_on(instance, node)
self.driver.activate_node(context, node, instance)
_update_state(context, node, instance, baremetal_states.ACTIVE)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error deploying instance %(instance)s "
"on baremetal node %(node)s.") %
{'instance': instance['uuid'],
'node': node['uuid']})
# Do not set instance=None yet. This prevents another
# spawn() while we are cleaning up.
_update_state(context, node, instance, baremetal_states.ERROR)
self.driver.deactivate_node(context, node, instance)
self.power_off(instance, node)
self.driver.deactivate_bootloader(context, node, instance)
self.driver.destroy_images(context, node, instance)
self._detach_block_devices(instance, block_device_info)
self._stop_firewall(instance, network_info)
self._unplug_vifs(instance, network_info)
_update_state(context, node, None, baremetal_states.DELETED)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context()
pm = get_power_manager(node=node, instance=instance)
state = pm.reboot_node()
if pm.state != baremetal_states.ACTIVE:
raise exception.InstanceRebootFailure(_(
"Baremetal power manager failed to restart node "
"for instance %r") % instance['uuid'])
_update_state(ctx, node, instance, state)
def destroy(self, instance, network_info, block_device_info=None):
context = nova_context.get_admin_context()
try:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
except exception.InstanceNotFound:
LOG.warning(_("Destroy called on non-existing instance %s")
% instance['uuid'])
return
try:
self.driver.deactivate_node(context, node, instance)
self.power_off(instance, node)
self.driver.deactivate_bootloader(context, node, instance)
self.driver.destroy_images(context, node, instance)
self._detach_block_devices(instance, block_device_info)
self._stop_firewall(instance, network_info)
self._unplug_vifs(instance, network_info)
_update_state(context, node, None, baremetal_states.DELETED)
except Exception as e:
with excutils.save_and_reraise_exception():
try:
LOG.error(_("Error from baremetal driver "
"during destroy: %s") % e)
_update_state(context, node, instance,
baremetal_states.ERROR)
except Exception:
LOG.error(_("Error while recording destroy failure in "
"baremetal database: %s") % e)
def power_off(self, instance, node=None):
"""Power off the specified instance."""
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.deactivate_node()
if pm.state != baremetal_states.DELETED:
raise exception.InstancePowerOffFailure(_(
"Baremetal power manager failed to stop node "
"for instance %r") % instance['uuid'])
pm.stop_console()
def power_on(self, instance, node=None):
"""Power on the specified instance."""
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.activate_node()
if pm.state != baremetal_states.ACTIVE:
raise exception.InstancePowerOnFailure(_(
"Baremetal power manager failed to start node "
"for instance %r") % instance['uuid'])
pm.start_console()
def get_volume_connector(self, instance):
return self.volume_driver.get_volume_connector(instance)
def attach_volume(self, connection_info, instance, mountpoint):
return self.volume_driver.attach_volume(connection_info,
instance, mountpoint)
def detach_volume(self, connection_info, instance_name, mountpoint):
return self.volume_driver.detach_volume(connection_info,
instance_name, mountpoint)
def get_info(self, instance):
# NOTE(deva): compute/manager.py expects to get NotFound exception
# so we convert from InstanceNotFound
inst_uuid = instance.get('uuid')
node = _get_baremetal_node_by_instance_uuid(inst_uuid)
pm = get_power_manager(node=node, instance=instance)
ps = power_state.SHUTDOWN
if pm.is_power_on():
ps = power_state.RUNNING
return {'state': ps,
'max_mem': node['memory_mb'],
'mem': node['memory_mb'],
'num_cpu': node['cpus'],
'cpu_time': 0}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
return True
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
return True
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def _node_resource(self, node):
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
vcpus = node['cpus']
memory_mb = node['memory_mb']
local_gb = node['local_gb']
if node['instance_uuid']:
vcpus_used = node['cpus']
memory_mb_used = node['memory_mb']
local_gb_used = node['local_gb']
dic = {'vcpus': vcpus,
'memory_mb': memory_mb,
'local_gb': local_gb,
'vcpus_used': vcpus_used,
'memory_mb_used': memory_mb_used,
'local_gb_used': local_gb_used,
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': str(node['uuid']),
'cpu_info': 'baremetal cpu',
}
return dic
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def get_available_resource(self, nodename):
context = nova_context.get_admin_context()
resource = {}
try:
node = db.bm_node_get_by_node_uuid(context, nodename)
resource = self._node_resource(node)
except exception.NodeNotFoundByUUID:
pass
return resource
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
self.firewall_driver.prepare_instance_filter(instance_ref,
network_info)
def unfilter_instance(self, instance_ref, network_info):
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def get_host_stats(self, refresh=False):
caps = []
context = nova_context.get_admin_context()
nodes = db.bm_node_get_all(context,
service_host=CONF.host)
for node in nodes:
res = self._node_resource(node)
nodename = str(node['uuid'])
data = {}
data['vcpus'] = res['vcpus']
data['vcpus_used'] = res['vcpus_used']
data['cpu_info'] = res['cpu_info']
data['disk_total'] = res['local_gb']
data['disk_used'] = res['local_gb_used']
data['disk_available'] = res['local_gb'] - res['local_gb_used']
data['host_memory_total'] = res['memory_mb']
data['host_memory_free'] = res['memory_mb'] - res['memory_mb_used']
data['hypervisor_type'] = res['hypervisor_type']
data['hypervisor_version'] = res['hypervisor_version']
data['hypervisor_hostname'] = nodename
data['supported_instances'] = self.supported_instances
data.update(self.extra_specs)
data['host'] = CONF.host
data['node'] = nodename
# TODO(NTTdocomo): put node's extra specs here
caps.append(data)
return caps
def plug_vifs(self, instance, network_info):
"""Plugin VIFs into networks."""
self._plug_vifs(instance, network_info)
def _plug_vifs(self, instance, network_info, context=None):
if not context:
context = nova_context.get_admin_context()
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
if node:
pifs = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
for pif in pifs:
if pif['vif_uuid']:
db.bm_interface_set_vif_uuid(context, pif['id'], None)
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
def _unplug_vifs(self, instance, network_info):
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, (network, mapping))
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
def get_console_output(self, instance):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
return self.driver.get_console_output(node, instance)
def get_available_nodes(self):
context = nova_context.get_admin_context()
return [str(n['uuid']) for n in
db.bm_node_get_all(context, service_host=CONF.host)]
| apache-2.0 | 904,373,623,955,433,700 | 39.842536 | 79 | 0.593881 | false |
OpenSeaMap/depth_api | src/utils.py | 1 | 1615 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------------------------
# OpenSeaMap API - Web API for OpenSeaMap services.
#
# Written in 2012 by Dominik Fässler [email protected]
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
# --------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------
# Imports external
# --------------------------------------------------------------------------------------------------
import random
# --------------------------------------------------------------------------------------------------
class Utils():
'''
Provides utility functions for this project.
'''
# ----------------------------------------------------------------------------------------------
@staticmethod
def generateRandomStringContainingLettersAndDigits(length):
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
maxCharsIndex = len(chars) - 1
result = ''
for i in range(0, length):
result = result + chars[random.randint(0, maxCharsIndex)]
return result
| cc0-1.0 | 1,834,044,831,373,146,600 | 38.365854 | 100 | 0.444238 | false |
deathbeam/awesomedotrc | qutebrowser/.config/qutebrowser/config.py | 1 | 8032 | # Enable plugins
c.content.plugins = True
# Adjust font and font size
c.fonts.completion.category = "bold 16pt monospace"
c.fonts.completion.entry = "16pt monospace"
c.fonts.debug_console = "16pt monospace"
c.fonts.downloads = "16pt monospace"
c.fonts.hints = "16pt monospace"
c.fonts.keyhint = "16pt monospace"
c.fonts.messages.error = "16pt monospace"
c.fonts.messages.info = "16pt monospace"
c.fonts.messages.warning = "16pt monospace"
c.fonts.monospace = "Terminus, monospace"
c.fonts.prompts = "16pt sans-serif"
c.fonts.statusbar = "16pt monospace"
c.fonts.tabs = "16pt monospace"
# Tabs
c.tabs.padding = {"top": 5, "bottom": 5, "left": 5, "right": 5}
# Play videos with mpv
config.bind('e', 'spawn mpv {url}')
config.bind('E', 'hint links spawn mpv {hint-url}')
# base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# Solarized Dark scheme by Ethan Schoonover (modified by aramisgithub)
base00 = "#002b36"
base01 = "#073642"
base02 = "#586e75"
base03 = "#657b83"
base04 = "#839496"
base05 = "#93a1a1"
base06 = "#eee8d5"
base07 = "#fdf6e3"
base08 = "#dc322f"
base09 = "#cb4b16"
base0A = "#b58900"
base0B = "#859900"
base0C = "#2aa198"
base0D = "#268bd2"
base0E = "#6c71c4"
base0F = "#d33682"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base00
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base01
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base0A
# Top border color of the completion widget category headers.
c.colors.completion.item.selected.border.top = base0A
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base0A
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base0A
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base03
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base00
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base00
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base05
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base00
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base05
| mit | -1,389,548,130,739,206,100 | 28.421245 | 71 | 0.766434 | false |
leo-the-manic/django-combinedform | testproject/testapp/tests.py | 1 | 21039 | """Tests for the CombinedForm utilitiy class."""
import datetime
import unittest
import unittest.mock
import django.db.models
import django.forms
import django.test
import django.utils.timezone
import combinedform
class CombinedFormTest(unittest.TestCase):
"""Tests for the CombinedForm utility class."""
def test_keys(self):
"""Test that the `keys` method works as expected."""
class MyCombinedForm(combinedform.CombinedForm):
form1 = combinedform.Subform(unittest.mock.MagicMock)
inst = MyCombinedForm()
self.assertEqual(list(inst.keys()), ['form1'])
def test_subform_arguments(self):
"""subform__arg will get sent to the right subform."""
subform_mock = unittest.mock.MagicMock()
class MyCombinedForm(combinedform.CombinedForm):
form1 = combinedform.Subform(subform_mock)
MyCombinedForm(form1__foo='bar')
subform_mock.assert_called_with(foo='bar')
def test_subform_arguments_not_sent_elsewhere(self):
"""A subform argument doesn't get sent to an unintended subform."""
subform_a = unittest.mock.MagicMock()
subform_b = unittest.mock.MagicMock()
class Combined(combinedform.CombinedForm):
form1 = combinedform.Subform(subform_a)
form2 = combinedform.Subform(subform_b)
Combined(form2__foo='bar')
subform_a.assert_called_with()
def test_global_args(self):
"""Arguments get sent to all subforms."""
subform_a = unittest.mock.MagicMock()
subform_b = unittest.mock.MagicMock()
class Combined(combinedform.CombinedForm):
form1 = combinedform.Subform(subform_a)
form2 = combinedform.Subform(subform_b)
Combined(foo='bar')
subform_a.assert_called_with(foo='bar')
subform_b.assert_called_with(foo='bar')
def test_errors(self):
"""errors collects subform errors."""
subform_a = unittest.mock.MagicMock()
subform_a().errors = {'foo_field': 'Not enough bars'}
subform_b = unittest.mock.MagicMock()
subform_b().errors = {'bar_field': 'Not enough foos'}
class Combined(combinedform.CombinedForm):
form1 = combinedform.Subform(subform_a)
form2 = combinedform.Subform(subform_b)
c = Combined()
expected_errors = {'form1': {'foo_field': 'Not enough bars'},
'form2': {'bar_field': 'Not enough foos'}}
self.assertEqual(c.errors, expected_errors)
def test_errors_go_to_subform(self):
"""combinedform errors can be assigned to subform fields."""
def my_validator(form):
raise combinedform.FieldValidationError('my_form',
{'my_field': ['foo']})
class MyForm(django.forms.Form):
my_field = django.forms.CharField(required=False)
class MyCombinedForm(combinedform.CombinedForm):
my_form = combinedform.Subform(MyForm)
validators = (my_validator,)
# ensure the subform alone is valid
subform = MyForm({})
self.assertTrue(subform.is_valid())
form = MyCombinedForm({})
self.assertFalse(form.is_valid())
self.assertEqual(['foo'], form.my_form.errors['my_field'])
def test_combinedform_validators_called(self):
"""Validators for the completed formset get called."""
validate_stuff = unittest.mock.MagicMock()
class Form(combinedform.CombinedForm):
validators = [validate_stuff]
inst = Form()
inst.forms_valid()
validate_stuff.assert_called_with(inst)
validator1 = unittest.mock.MagicMock()
validator2 = unittest.mock.MagicMock()
class Form2(combinedform.CombinedForm):
validators = [validator1, validator2]
inst = Form2()
inst.forms_valid()
validator1.assert_called_with(inst)
validator2.assert_called_with(inst)
def test_forms_valid_when_no_validators(self):
"""When there are no validators, forms_valid() is True."""
class Form(combinedform.CombinedForm):
pass
inst = Form()
self.assertTrue(inst.forms_valid())
def test_validator_raises_means_forms_invalid(self):
"""When a validator raises ValidationError, forms_valid() is False."""
error = django.forms.ValidationError("Invalid")
validator = unittest.mock.MagicMock(side_effect=error)
class Combined(combinedform.CombinedForm):
validators = [validator]
inst = Combined()
self.assertFalse(inst.forms_valid())
def test_validator_exceptions_added_to_errorlist(self):
"""When a validator raises ValidationError, its message is stored."""
error = django.forms.ValidationError("Invalid")
validator = unittest.mock.MagicMock(side_effect=error)
class Combined(combinedform.CombinedForm):
validators = [validator]
inst = Combined()
inst.forms_valid()
self.assertEqual(inst.non_field_errors, ['Invalid'])
def test_iterator_returns_keys(self):
"""The iterator yields the subform names."""
form_a = unittest.mock.MagicMock()
class Combined(combinedform.CombinedForm):
form1 = combinedform.Subform(form_a)
iter_vals = list(iter(Combined()))
self.assertEqual(iter_vals, ['form1'])
form_b = unittest.mock.MagicMock()
class Combined2(combinedform.CombinedForm):
form1 = combinedform.Subform(form_a)
form2 = combinedform.Subform(form_b)
iter_vals2 = list(iter(Combined2()))
self.assertEqual(iter_vals2, ['form1', 'form2'])
def test_subforms_valid(self):
"""subforms_valid() is True if all subforms are valid."""
subform_a = unittest.mock.MagicMock()
subform_a.is_valid.return_value = True
class Combined(combinedform.CombinedForm):
form1 = combinedform.Subform(subform_a)
self.assertTrue(Combined().subforms_valid())
def test_invalid_subform_subforms_invalid(self):
"""subforms_valid() is False if a subform is invalid."""
subform_a = unittest.mock.MagicMock()
subform_a().is_valid.return_value = False
class Combined(combinedform.CombinedForm):
form1 = combinedform.Subform(subform_a)
self.assertFalse(Combined().subforms_valid())
def test_is_valid_true_when_all_valid(self):
"""is_valid() is True if subforms and CombinedForm are both valid."""
class Combined(combinedform.CombinedForm):
validators = [unittest.mock.MagicMock()]
form1 = combinedform.Subform(unittest.mock.MagicMock())
self.assertTrue(Combined().is_valid())
def test_is_valid_false_on_bad_validator(self):
"""is_valid() is False if CombinedForm validator is false."""
error = django.forms.ValidationError('a')
class Combined(combinedform.CombinedForm):
validators = [
unittest.mock.MagicMock(side_effect=error)]
form1 = combinedform.Subform(unittest.mock.MagicMock())
self.assertFalse(Combined().is_valid())
def test_is_valid_false_on_bad_subform(self):
"""is_valid() is False if a subform's is_valid() is False."""
subform = unittest.mock.MagicMock()
subform().is_valid.return_value = False
class Combined(combinedform.CombinedForm):
validators = [unittest.mock.MagicMock()]
form1 = combinedform.Subform(subform)
self.assertFalse(Combined().is_valid())
def test_is_valid_true_for_empty_inst(self):
"""A CombinedForm with no validators or subforms is valid."""
class Combined(combinedform.CombinedForm):
pass
self.assertTrue(Combined().is_valid())
def test_non_field_errors_gets_subform_errors(self):
"""non_field_errors gets all nonfield errors from subforms."""
subform = unittest.mock.MagicMock()
subform().non_field_errors.return_value = ['foo']
class Combined(combinedform.CombinedForm):
form1 = combinedform.Subform(subform)
self.assertEqual(Combined().non_field_errors, ['foo'])
def test_provides_combined_cleaned_data(self):
"""Provides a combined cleaned data attribute."""
RadioSelect = django.forms.RadioSelect
class YesNoForm(django.forms.Form):
val = django.forms.TypedChoiceField(((True, 'Yes'), (False, 'No')),
coerce=lambda v: v == 'Yes',
widget=RadioSelect)
class MyForm(combinedform.CombinedForm):
yesno = combinedform.Subform(YesNoForm, prefix='yesno')
f = MyForm({'yesno-val': 'Yes'})
self.assertTrue(f.is_valid(), f.errors)
self.assertEqual({'yesno': {'val': True}}, f.cleaned_data)
class TimeForm(django.forms.Form):
time = django.forms.DateTimeField()
class MyForm2(combinedform.CombinedForm):
event = combinedform.Subform(TimeForm, prefix='event')
f = MyForm2({'event-time': '4/5/2010 3:30'})
self.assertTrue(f.is_valid(), f.errors)
expected_time = datetime.datetime(year=2010, month=4, day=5, hour=3,
minute=30)
# django attaches a tz so attach to expected data, too
tz = django.utils.timezone
expected_time = tz.make_aware(expected_time, tz.get_default_timezone())
expected_data = {
'event': {'time': expected_time},
}
self.assertEqual(expected_data, f.cleaned_data)
def test_cleaneddata_without_prefix(self):
"""cleaned_data operates on prefix-less subforms."""
class MyForm(django.forms.Form):
my_field = django.forms.CharField()
class MyCombined(combinedform.CombinedForm):
form = combinedform.Subform(MyForm)
combined = MyCombined({'my_field': 'foo'})
assert combined.is_valid()
self.assertEqual({'form': {'my_field': 'foo'}}, combined.cleaned_data)
def test_initial_distributes_to_subforms(self):
"""The 'initial' kwarg of __init__ takes a nested dict."""
class Form1(django.forms.Form):
foo = django.forms.CharField()
class Form2(django.forms.Form):
foo = django.forms.CharField()
class Formset(combinedform.CombinedForm):
form1 = combinedform.Subform(Form1)
form2 = combinedform.Subform(Form2)
initial_data = {
'form1': {'foo': 'form1 foo'},
'form2': {'foo': 'form2 foo'},
}
fset = Formset(initial=initial_data)
self.assertEqual({'foo': 'form1 foo'}, fset.form1.initial)
self.assertEqual({'foo': 'form2 foo'}, fset.form2.initial)
def test_non_field_errors_gets_formsets(self):
"""non_field_errors can handle formsets."""
formset = unittest.mock.MagicMock()
del formset().non_field_errors
formset().non_form_errors.return_value = ['foo']
class Combined(combinedform.CombinedForm):
form1 = combinedform.Subform(formset)
self.assertEqual(Combined().non_field_errors, ['foo'])
def test_validator_args_errormessage(self):
"""A validator with the wrong signature gets a helpful message."""
validator = lambda: None
class Combined(combinedform.CombinedForm):
validators = [validator]
try:
Combined().is_valid()
except TypeError as e:
self.assertIn('Does your validator', str(e))
def test_validator_raising_typeerror_left_alone(self):
"""A validator which raises a TypeError doesn't get swallowed."""
def validator(form):
raise TypeError("Foo")
class Combined(combinedform.CombinedForm):
validators = [validator]
try:
Combined().is_valid()
except TypeError as e:
self.assertNotIn("Does your validator", str(e))
def test_empty_formset_doesnt_propgate_empty_errors(self):
"""A formset with no errors returns an empty error result."""
class MyModel(django.db.models.Model):
a = django.db.models.CharField(max_length=10)
b = django.db.models.CharField(max_length=10)
class MyForm(django.forms.ModelForm):
class Meta:
model = MyModel
fields = ('b',)
MyFormSet = django.forms.formsets.formset_factory(MyForm)
data = {'form-0-b': '1', 'form-1-b': '2', 'form-1-b': '3',
'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '3',
'form-MAX_NUM_FORMS': '1000'}
class MyCombinedForm(combinedform.CombinedForm):
myform = combinedform.Subform(MyFormSet)
print(MyCombinedForm().myform)
combined = MyCombinedForm(data)
self.assertEqual(combined.errors, {})
self.assertEqual(combined.non_field_errors, [])
class OrderByDependencyTest(unittest.TestCase):
"""Tests for the order_by_dependency function.
In order to make the test output less ridiculous to read, a "stringify"
method is used to refer to models just by their unqiue digit.
.. warning::
Django's ORM has some kind of global namespace problem, and doesn't
clean that namespace between test runs, so the models can't all be
named Model1, Model2 etc. between test cases.
"""
def stringify(self, seq):
"""Convert lists of models named ModelN to just strings of Ns.
N is an integer. Leading zeroes are removed from N.
"""
def reduce_to_number(s):
"""Reduce a string "a0b1" to "1".
Removes alphabetic characters as well as leading zeroes.
>>> reduce_to_number("a1b2c3")
"123"
>>> reduce_to_number("q0b2x0")
"20"
"""
digits = (c for c in s if c.isdigit())
num_str = ''.join(digits)
return num_str.lstrip('0') # remove leading zeroes
return " ".join(reduce_to_number(str(e)) for e in seq)
def test_basic_foreignkey(self):
"""Properly orders a basic foreign key relationship."""
class Model1(django.db.models.Model):
pass
class Model2(django.db.models.Model):
m1 = django.db.models.ForeignKey(Model1)
result = combinedform.order_by_dependency([Model2, Model1])
self.assertEqual(self.stringify(result), "1 2")
class Model3(django.db.models.Model):
pass
class Model4(django.db.models.Model):
m3 = django.db.models.ForeignKey(Model3)
result = combinedform.order_by_dependency([Model3, Model4])
self.assertEqual(self.stringify(result), "3 4")
def test_second_level_fks(self):
"""Test a set of foreign key relations two levels deep."""
"""Visual of the model relationships in this test:
m4
\
m2 m3
\ /
m1
"""
class Model01(django.db.models.Model):
pass
class Model02(django.db.models.Model):
m1 = django.db.models.ForeignKey(Model01)
class Model03(django.db.models.Model):
m1 = django.db.models.ForeignKey(Model01)
class Model04(django.db.models.Model):
m2 = django.db.models.ForeignKey(Model02)
result = combinedform.order_by_dependency(
[Model03, Model02, Model04, Model01])
# there are two equivalent possibilities
# also convert to strings because it's way easier to read the output
result = self.stringify(result)
self.assertIn(result, ["1 2 3 4", "1 2 4 3"])
def test_ignores_externals(self):
"""The ordering doesn't account for models not given as arguments."""
"""Visual of the model relationships in this test:
m1 m2
\ /
m3
/
m4
"""
class Model001(django.db.models.Model):
pass
class Model002(django.db.models.Model):
pass
class Model003(django.db.models.Model):
m1 = django.db.models.ForeignKey(Model001)
m2 = django.db.models.ForeignKey(Model002)
class Model004(django.db.models.Model):
m3 = django.db.models.ForeignKey(Model003)
# add extra models to artifically add depth to Model3 that's not
# relevant for the subgraph we're interested in; test if it is properly
# ignored
class ModelA(django.db.models.Model):
m3 = django.db.models.ForeignKey(Model003)
class ModelB(django.db.models.Model):
ma = django.db.models.ForeignKey(ModelA)
class ModelC(django.db.models.Model):
mb = django.db.models.ForeignKey(ModelB)
result = combinedform.order_by_dependency(
[Model003, Model002, Model004, Model001])
result = self.stringify(result)
self.assertIn(result, ["1 2 3 4", "2 1 3 4"])
class CombinedFormIntegrationTest(django.test.TestCase):
"""Test the features of CombinedForm which use the database."""
def test_dependency_saving(self):
"""Test models are saved in a safe order and properly linked."""
class ModelFoo(django.db.models.Model):
description = django.db.models.CharField(max_length=20)
class ModelBar(django.db.models.Model):
name = django.db.models.CharField(max_length=20)
foo = django.db.models.ForeignKey(ModelFoo)
class ModelBuzz(django.db.models.Model):
title = django.db.models.CharField(max_length=20)
bar = django.db.models.ForeignKey(ModelBar)
class FooForm(django.forms.ModelForm):
class Meta:
model = ModelFoo
fields = ('description',)
class BarForm(django.forms.ModelForm):
class Meta:
model = ModelBar
fields = ('name',)
class BuzzForm(django.forms.ModelForm):
class Meta:
model = ModelBuzz
fields = ('title',)
fset_factory = django.forms.models.inlineformset_factory
BuzzFormset = fset_factory(ModelBar, ModelBuzz, form=BuzzForm,
can_delete=False)
class TheForm(combinedform.CombinedForm):
# models are given backwards just to ensure it doesn't accidentally
# save in the intended order
buzz = combinedform.Subform(BuzzFormset)
bar = combinedform.Subform(BarForm)
foo = combinedform.Subform(FooForm)
formdata = {'description': 'an', 'name': 'i',
'modelbuzz_set-0-title': 'yo',
'modelbuzz_set-1-title': 'dawg',
'modelbuzz_set-TOTAL_FORMS': 3,
'modelbuzz_set-INITIAL_FORMS': 0,
'modelbuzz_set-MAX_NUM_FORMS': 1000, }
inst = TheForm(formdata)
self.assertTrue(inst.is_valid())
saved = inst.save(commit=False) # can't do a real save on above models
buzz = saved['buzz'][0]
self.assertIsInstance(buzz, ModelBuzz)
self.assertEqual(buzz.title, 'yo')
self.assertTrue(isinstance(buzz.bar, ModelBar))
self.assertEqual(buzz.bar.name, 'i')
self.assertTrue(isinstance(buzz.bar.foo, ModelFoo))
self.assertEqual(buzz.bar.foo.description, 'an')
class MainFormTest(unittest.TestCase):
"""Tests for ``main_form`` attribute of py:class:`CombinedForm`."""
def mockmodelform(self):
"""Make a mock ModelForm."""
form = unittest.mock.MagicMock(spec=django.forms.ModelForm)
model = unittest.mock.MagicMock(spec=django.db.models.Model)
form.return_value.save.return_value = model
return form
def test_save_obeys_main_form(self):
"""The save() method will proxy the value of 'main_form' if set."""
MyModelForm = self.mockmodelform()
class MyCombinedForm(combinedform.CombinedForm):
form_a = combinedform.Subform(MyModelForm)
form_b = combinedform.Subform(self.mockmodelform())
main_form = 'form_a'
form = MyCombinedForm()
self.assertEqual(form.save(), MyModelForm().save())
def test_save_returns_map_with_no_main(self):
"""If main_class is not set, save() returns a map."""
class MyCombinedForm(combinedform.CombinedForm):
form_a = combinedform.Subform(self.mockmodelform())
form_b = combinedform.Subform(self.mockmodelform())
form = MyCombinedForm()
self.assertEqual(set(form.save().keys()), set(['form_a', 'form_b']))
| lgpl-3.0 | 8,430,362,887,954,290,000 | 33.890547 | 79 | 0.609202 | false |
be-cloud-be/horizon-addons | horizon/school_course_description/controllers/main.py | 1 | 2421 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2015 be-cloud.be
# Jerome Sonnet <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import http
from openerp.http import request
from openerp import tools
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class school_course_documentation(http.Controller):
@http.route(['/course_doc/<model("school.course"):course>'], type='http', auth='public', website=True)
def course_doc(self, course, redirect=None, **post):
values = {
'course' : course,
'doc' : course.documentation_id,
}
return request.website.render("school_course_description.school_course", values)
@http.route(['/course_doc/edit/<model("school.course"):course>'], type='http', auth='user', website=True)
def course_doc_edit(self, course, redirect=None, **post):
draft_doc = course.env['school.course_documentation'].search([['course_id', '=', course.id],['state','=','draft']])
if not draft_doc:
active_doc = course.env['school.course_documentation'].search([['course_id', '=', course.id],['state','=','published']])
if active_doc:
draft_doc = active_doc.copy()
else:
draft_doc = course.env['school.course_documentation'].create({'course_id' : course.id})
values = {
'course' : course,
'doc' : draft_doc,
}
return request.website.render("school_course_description.school_course_edit", values) | agpl-3.0 | 6,456,291,296,646,337,000 | 43.814815 | 132 | 0.601075 | false |
erikrose/oedipus | oedipus/tests/test_results.py | 1 | 6546 | import collections
import fudge
from nose.tools import eq_, assert_raises
from oedipus import S
from oedipus.tests import Biscuit, SphinxMockingTestCase, BaseSphinxMeta
from oedipus.results import ObjectResults, DictResults, TupleResults
class ResultsTestCase(SphinxMockingTestCase):
"""Tests for various result formatters"""
@fudge.patch('sphinxapi.SphinxClient')
def test_objects(self, sphinx_client):
"""Test constructing and iterating over object-style results."""
self.mock_sphinx(sphinx_client)
results = list(S(Biscuit)) # S.__iter__ and DictResults.__iter__
eq_(results[0].color, 'red')
eq_(results[1].color, 'blue')
@fudge.patch('sphinxapi.SphinxClient')
def test_dicts_all_fields(self, sphinx_client):
"""Test constructing and iterating over dict-style results returning all model fields."""
self.mock_sphinx(sphinx_client)
results = list(S(Biscuit).values_dict())
eq_(results, [{'color': 'red', 'id': 123},
{'color': 'blue', 'id': 124}])
@fudge.patch('sphinxapi.SphinxClient')
def test_dicts_without_id(self, sphinx_client):
"""Test dict-style results with explicit fields excluding ID."""
self.mock_sphinx(sphinx_client)
results = list(S(Biscuit).values_dict('color'))
eq_(results, [{'color': 'red'},
{'color': 'blue'}])
@fudge.patch('sphinxapi.SphinxClient')
def test_dicts_overriding(self, sphinx_client):
"""Calls to ``values_dict()`` should override previous ones."""
self.mock_sphinx(sphinx_client)
results = list(S(Biscuit).values_dict('color').values_dict('id'))
eq_(results, [{'id': 123},
{'id': 124}])
@fudge.patch('sphinxapi.SphinxClient')
def test_tuples(self, sphinx_client):
"""Test constructing and iterating over tuple-style results returning all model fields."""
self.mock_sphinx(sphinx_client)
results = list(S(Biscuit).values('id', 'color'))
eq_(results, [(123, 'red'), (124, 'blue')])
@fudge.patch('sphinxapi.SphinxClient')
def test_tuples_without_id(self, sphinx_client):
"""Test tuple-style results that don't return ID."""
self.mock_sphinx(sphinx_client)
results = list(S(Biscuit).values('color'))
eq_(results, [('red',), ('blue',)])
@fudge.patch('sphinxapi.SphinxClient')
def test_tuples_overriding(self, sphinx_client):
"""Calls to ``values()`` should override previous ones."""
self.mock_sphinx(sphinx_client)
results = list(S(Biscuit).values('color').values('id'))
eq_(results, [(123,), (124,)])
@fudge.patch('sphinxapi.SphinxClient')
def test_tuples_no_fields(self, sphinx_client):
"""An empty values() call should raise ``TypeError``."""
s = S(Biscuit)
assert_raises(TypeError, s.values)
@fudge.patch('sphinxapi.SphinxClient')
def test_id_field(self, sphinx_client):
"""Assert that fetching results gets its object ID from the attr named by SphinxMeta.field_id, if present."""
(sphinx_client.expects_call().returns_fake()
.is_a_stub()
.expects('RunQueries').returns(
[{'status': 0,
'total': 2,
'matches':
[{'attrs': {'thing_id': 123},
'id': 3,
'weight': 11111},
{'attrs': {'thing_id': 124},
'id': 4,
'weight': 10000}]}]))
class FunnyIdBiscuit(Biscuit):
class SphinxMeta(BaseSphinxMeta):
id_field = 'thing_id'
results = list(S(FunnyIdBiscuit).values('color'))
eq_(results, [('red',), ('blue',)])
@fudge.patch('sphinxapi.SphinxClient')
def test_object_ids(self, sphinx_client):
"""Test object_ids() method."""
self.mock_sphinx(sphinx_client)
results = S(Biscuit).object_ids()
# Note: The results are dependent on mock_sphinx. It should
# return a list of ids.
eq_(results, [123, 124])
@fudge.patch('sphinxapi.SphinxClient')
def test_object_ids_with_id_field(self, sphinx_client):
"""Test object_ids() method with field_id."""
(sphinx_client.expects_call().returns_fake()
.is_a_stub()
.expects('RunQueries').returns(
[{'status': 0,
'total': 2,
'matches':
[{'attrs': {'thing_id': 123},
'id': 3,
'weight': 11111},
{'attrs': {'thing_id': 124},
'id': 4,
'weight': 10000}]}]))
class FunnyIdBiscuit(Biscuit):
class SphinxMeta(BaseSphinxMeta):
id_field = 'thing_id'
results = S(FunnyIdBiscuit).object_ids()
eq_(results, [123, 124])
def test_object_content_for_fields():
TestResult = collections.namedtuple('TestResult', ['field1', 'field2'])
content = ObjectResults.content_for_fields(
TestResult('1', '2'),
['field1', 'field2'],
['field1', 'field2'])
eq_(content, ('1', '2'))
# Test the case where fields != highlight_fields.
content = ObjectResults.content_for_fields(
TestResult('4', '5'),
['field1', 'field2'],
['field1'])
eq_(content, ('4',))
def test_tuple_content_for_fields():
content = TupleResults.content_for_fields(
('1', '2'),
['field1', 'field2'],
['field1', 'field2'])
eq_(content, ('1', '2'))
# Test the case where fields != highlight_fields.
content = TupleResults.content_for_fields(
('1', '2'),
['field1', 'field2'],
['field1'])
eq_(content, ('1',))
def test_dict_content_for_fields():
content = DictResults.content_for_fields(
{'field1': '1', 'field2': '2'},
['field1', 'field2'],
['field1', 'field2'])
eq_(content, ('1', '2'))
# Test the case where fields != highlight_fields.
content = DictResults.content_for_fields(
{'field1': '1', 'field2': '2'},
['field1', 'field2'],
['field1'])
eq_(content, ('1',))
| bsd-3-clause | -3,127,027,413,743,744,000 | 36.405714 | 117 | 0.539413 | false |
Inboxen/queue | tests/test_misc.py | 1 | 3639 | ##
# Copyright (C) 2014 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from datetime import datetime
from django import test
from django.contrib.auth import get_user_model
from django.core import mail
from celery import chain
from pytz import utc
from inboxen import models
from queue import tasks
class StatsTestCase(test.TestCase):
"""Test flag tasks"""
# only testing that it doesn't raise an exception atm
fixtures = ['inboxen_testdata.json']
def test_no_exceptions(self):
tasks.statistics.delay()
class FlagTestCase(test.TestCase):
"""Test flag tasks"""
# only testing that it doesn't raise an exception atm
# TODO: actually test
fixtures = ['inboxen_testdata.json']
def setUp(self):
super(FlagTestCase, self).setUp()
self.user = get_user_model().objects.get(username="isdabizda")
self.emails = [email.id for email in models.Email.objects.filter(inbox__user=self.user)[:10]]
def test_flags_from_unified(self):
tasks.deal_with_flags.delay(self.emails, user_id=self.user.id)
def test_flags_from_single_inbox(self):
inbox = models.Inbox.objects.filter(email__id=self.emails[0]).only("id").get()
tasks.deal_with_flags.delay(self.emails, user_id=self.user.id, inbox_id=inbox.id)
class SearchTestCase(test.TestCase):
fixtures = ['inboxen_testdata.json']
def test_search(self):
result = tasks.search.delay(1, "bizz").get()
self.assertItemsEqual(result.keys(), ["emails", "inboxes"])
@test.utils.override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=(("Travis", "[email protected]"),),
)
class RequestReportTestCase(test.TestCase):
fixtures = ['inboxen_testdata.json']
def setUp(self):
self.user = get_user_model().objects.get(username="isdabizda")
self.user.userprofile # autocreate a profile
now = datetime.now(utc)
models.Request.objects.create(amount=200, date=now, succeeded=True, requester=self.user)
self.waiting = models.Request.objects.create(amount=200, date=now, requester=self.user)
def test_fetch(self):
results = tasks.requests_fetch.delay().get()
self.assertEqual(len(results), 1)
self.assertItemsEqual(
results[0],
("id", "amount", "date", "requester__username", "requester__userprofile__pool_amount"),
)
self.assertEqual(results[0]["id"], self.waiting.id)
def test_report(self):
chain(tasks.requests_fetch.s(), tasks.requests_report.s()).delay()
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Amount: 200", mail.outbox[0].body)
self.assertIn("User: %s" % (self.user.username), mail.outbox[0].body)
self.assertIn("Date:", mail.outbox[0].body)
self.assertIn("Current: %s" % (self.user.userprofile.pool_amount,), mail.outbox[0].body)
| agpl-3.0 | -8,369,771,110,336,177,000 | 35.757576 | 103 | 0.673537 | false |
nextgis/qgis.lesis2sqlite | src/dialog.py | 1 | 10596 | # -*- coding: utf-8 -*-
#******************************************************************************
#
# lesis2sqlite
# ---------------------------------------------------------
# This plugin convert lesis GIS working dir structure to sqlite data base
#
# Author: Alexander Lisovenko, [email protected]
# *****************************************************************************
# Copyright (c) 2015-2016. NextGIS, [email protected]
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# A copy of the GNU General Public License is available on the World Wide Web
# at <http://www.gnu.org/licenses/>. You can also obtain it by writing
# to the Free Software Foundation, 51 Franklin Street, Suite 500 Boston,
# MA 02110-1335 USA.
#
#******************************************************************************
import os
from PyQt4 import QtCore
from PyQt4 import QtGui
from qgis_plugin_base import Plugin
from worker import Worker
class InputValue(QtCore.QObject):
def __init__(self, vName, vType, defaultValue):
QtCore.QObject.__init__(self)
self._name = vName
self._type = vType
self._defaultValue = defaultValue
self._settingsKey = "%s/%s" % (Plugin().getPluginName(), self._name)
def getValue(self):
value = QtCore.QSettings().value(
self._settingsKey,
self._defaultValue,
type=self._type)
return value
def setValue(self, value):
QtCore.QSettings().setValue(self._settingsKey, value)
class IVFile(QtGui.QWidget, InputValue):
def __init__(self, name, title, chooserCaption, parent=None):
InputValue.__init__(self, name, unicode, u"")
QtGui.QWidget.__init__(self, parent)
self._chooserCaption = chooserCaption
self._layout = QtGui.QHBoxLayout(self)
self.__lable = QtGui.QLabel(title)
self.__lable.setFixedWidth(80)
self.__lable.setWordWrap(True)
self._layout.addWidget(self.__lable)
self._inputValue = QtGui.QLineEdit(self.getValue())
self._inputValue.editingFinished.connect(self.saveValue)
self._layout.addWidget(self._inputValue)
self.__button = QtGui.QPushButton(QtCore.QCoreApplication.translate("lesis2sqlite", "Browse"))
self.__button.clicked.connect(self.chooseFile)
self._layout.addWidget(self.__button)
def chooseFile(self):
chooserDir = QtCore.QDir.homePath()
v = self.getValue()
if (v != u"") and os.path.exists(v):
chooserDir = os.path.dirname(self.getValue())
fName = QtGui.QFileDialog.getOpenFileName(self, self._chooserCaption, chooserDir)
self._inputValue.setText(fName)
self.saveValue()
def saveValue(self):
v = self._inputValue.text()
self.setValue(v)
class IVNewFile(IVFile):
def chooseFile(self):
chooserDir = QtCore.QDir.homePath()
v = self.getValue()
if (v != u"") and os.path.exists(v):
chooserDir = os.path.dirname(self.getValue())
fName = QtGui.QFileDialog.getSaveFileName(self, self._chooserCaption, chooserDir)
self._inputValue.setText(fName)
self.saveValue()
class IVDir(IVFile):
def chooseFile(self):
chooserDir = QtCore.QDir.homePath()
v = self.getValue()
if (v != u"") and os.path.exists(v):
chooserDir = os.path.dirname(self.getValue())
fName = QtGui.QFileDialog.getExistingDirectory(self, self._chooserCaption, chooserDir)
self._inputValue.setText(fName)
self.saveValue()
class Lesis2SQLiteDialog(QtGui.QDialog):
layerSrcCreated = QtCore.pyqtSignal(unicode)
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setWindowTitle(Plugin().getPluginName())
self.__mainLayout = QtGui.QVBoxLayout(self)
self.__layout = QtGui.QGridLayout(self)
self.__mainLayout.addLayout(self.__layout)
self.__layout.addWidget(QtGui.QLabel(self.tr("Lesis base directory") + ":"), 0, 0)
self.__layout.addWidget(QtGui.QLabel(self.tr("Parcels shape-file") + ":"), 1, 0)
self.__layout.addWidget(QtGui.QLabel(self.tr("SQLite database") + ":"), 2, 0)
settings = QtCore.QSettings()
self.lesisBaseDir = QtGui.QLineEdit(self)
self.lesisBaseDir.setText(
settings.value("%s/lesisBaseDir" % (Plugin().getPluginName(), ), u"", type = unicode)
)
self.shape = QtGui.QLineEdit(self)
self.shape.setText(
settings.value("%s/shape" % (Plugin().getPluginName(), ), u"", type = unicode)
)
self.sqliteDB = QtGui.QLineEdit(self)
self.sqliteDB.setText(
settings.value("%s/sqliteDB" % (Plugin().getPluginName(), ), u"", type = unicode)
)
self.__layout.addWidget(self.lesisBaseDir, 0, 1)
self.__layout.addWidget(self.shape, 1, 1)
self.__layout.addWidget(self.sqliteDB, 2, 1)
self.btnLesisBaseDir = QtGui.QPushButton(self.tr("Browse"), self)
self.btnLesisBaseDir.clicked.connect(self.chooseLesisBaseDir)
self.btnShape = QtGui.QPushButton(self.tr("Browse"), self)
self.btnShape.clicked.connect(self.chooseShapeFile)
self.btnSQLiteDB = QtGui.QPushButton(self.tr("Browse"), self)
self.btnSQLiteDB.clicked.connect(self.choosesqliteDB)
self.__layout.addWidget(self.btnLesisBaseDir, 0, 2)
self.__layout.addWidget(self.btnShape, 1, 2)
self.__layout.addWidget(self.btnSQLiteDB, 2, 2)
# self.lesisBaseDir = IVDir("lesis_base_dir", self.tr("Lesis base dir"), self.tr("Select Lesis base dir"), self)
# self.__layout.addWidget(self.lesisBaseDir)
# self.shape = IVFile("lesis_shape", self.tr("Videls shape 1"), self.tr("Select Videls shape"), self)
# self.__layout.addWidget (self.shape)
# self.sqliteDB = IVNewFile("sqlite_db", self.tr("SQLite db 1"), self.tr("Select output sqlite DB"), self)
# self.__layout.addWidget(self.sqliteDB)
self.__mainLayout.addItem(QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding))
self.__statusLable = QtGui.QLabel(self)
self.__mainLayout.addWidget(self.__statusLable)
self.__bbox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok)
self.__bbox.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.__bbox.accepted.connect(self.accept)
self.__mainLayout.addWidget(self.__bbox)
self.worker = None
self.thread = None
def chooseShapeFile(self):
chooserDir = QtCore.QDir.homePath()
currentFilename = self.shape.text()
if os.path.exists(currentFilename):
chooserDir = os.path.dirname(currentFilename)
fName = QtGui.QFileDialog.getOpenFileName(self, self.tr("Select Parcels shape"), chooserDir)
if fName != u"":
self.shape.setText(fName)
def chooseLesisBaseDir(self):
chooserDir = QtCore.QDir.homePath()
currentDirname = self.lesisBaseDir.text()
if os.path.exists(currentDirname):
chooserDir = os.path.dirname(currentDirname)
fName = QtGui.QFileDialog.getExistingDirectory(self, self.tr("Select Lesis base dir"), chooserDir)
if fName != u"":
self.lesisBaseDir.setText(fName)
def choosesqliteDB(self):
chooserDir = QtCore.QDir.homePath()
currentFilename = self.sqliteDB.text()
if os.path.exists(currentFilename):
chooserDir = os.path.dirname(currentFilename)
fName = QtGui.QFileDialog.getSaveFileName(self, self.tr("Select output sqlite DB"), chooserDir)
if fName != u"":
self.sqliteDB.setText(fName)
def validate(self):
exceptions = []
if not os.path.exists(self.lesisBaseDir.text()):
exceptions.append(self.tr("Specified Lesis base dir not found!"))
if not os.path.exists(self.shape.text()):
exceptions.append(self.tr("Specified shape file not found!"))
if self.sqliteDB.text() == u"":
exceptions.append(self.tr("Sqlite db file not specified!"))
if len(exceptions) > 0:
QtGui.QMessageBox.critical(self, self.tr("Validate error"), "\n".join(exceptions))
return False
return True
def accept(self):
if not self.validate():
return
settings = QtCore.QSettings()
settings.setValue("%s/lesisBaseDir" % (Plugin().getPluginName(), ), self.lesisBaseDir.text())
settings.setValue("%s/shape" % (Plugin().getPluginName(), ), self.shape.text())
settings.setValue("%s/sqliteDB" % (Plugin().getPluginName(), ), self.sqliteDB.text())
self.__bbox.button(QtGui.QDialogButtonBox.Ok).setEnabled(False)
worker = Worker(
self.lesisBaseDir.text(),
self.shape.text(),
self.sqliteDB.text()
)
thread = QtCore.QThread(self)
worker.moveToThread(thread)
worker.statusChanged.connect(self.changeProgressStatus)
worker.error.connect(self.workerErorrProcess)
thread.started.connect(worker.run)
worker.stoped.connect(self.addLayer)
worker.stoped.connect(thread.quit)
worker.stoped.connect(worker.deleteLater)
worker.stoped.connect(thread.deleteLater)
worker.stoped.connect(self.close)
thread.start()
self.thread = thread
self.worker = worker
def reject(self):
Plugin().plPrint("reject")
if self.worker is not None:
self.worker.interupt()
return QtGui.QDialog.reject(self)
def workerErorrProcess(self, msg):
QtGui.QMessageBox.critical(self, "Export error", msg)
def changeProgressStatus(self, status):
self.__statusLable.setText(status)
# Plugin().plPrint("change status on '%s'" % status)
def addLayer(self):
self.layerSrcCreated.emit(self.sqliteDB.text())
| gpl-2.0 | -8,985,815,751,092,968,000 | 35.163823 | 120 | 0.620895 | false |
ael-code/libreant | setup.py | 1 | 4963 | import os
import sys
import msgfmt
from setuptools import setup
from setuptools.command.install_lib import install_lib as _install_lib
from setuptools.command.develop import develop as _develop
from distutils.command.build import build as _build
from setuptools.command.test import test as TestCommand
from distutils.cmd import Command
class compile_translations(Command):
description = 'compile message catalogs to .mo files'
user_options = [('force', 'f', "compile also not updated message catalogs")]
boolean_options = ['force']
def initialize_options(self):
self.force = False
def finalize_options(self):
pass
def run(self):
"""
Compile all message catalogs .mo files into .po files.
Skips not changed file based on source mtime.
"""
# thanks to deluge guys ;)
po_dir = os.path.join(os.path.dirname(__file__), 'webant', 'translations')
print('Compiling po files from "{}"...'.format(po_dir))
for lang in os.listdir(po_dir):
sys.stdout.write("\tCompiling {}... ".format(lang))
sys.stdout.flush()
curr_lang_path = os.path.join(po_dir, lang)
for path, dirs, filenames in os.walk(curr_lang_path):
for f in filenames:
if f.endswith('.po'):
src = os.path.join(path, f)
dst = os.path.join(path, f[:-3] + ".mo")
if not os.path.exists(dst) or self.force:
msgfmt.make(src, dst)
print("ok.")
else:
src_mtime = os.stat(src)[8]
dst_mtime = os.stat(dst)[8]
if src_mtime > dst_mtime:
msgfmt.make(src, dst)
print("ok.")
else:
print("already up to date.")
print('Finished compiling translation files.')
class build(_build):
sub_commands = [('compile_translations', None)] + _build.sub_commands
class install_lib(_install_lib):
def run(self):
self.run_command('compile_translations')
_install_lib.run(self)
class develop(_develop):
def run(self):
self.run_command('compile_translations')
_develop.run(self)
class NoseTestCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Run nose ensuring that argv simulates running nosetests directly
import nose
nose.run_exit(argv=['nosetests'])
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as buf:
return buf.read()
conf = dict(
name='libreant',
version='0.3',
description='{e,}book archive focused on small grass root archives, distributed search, low assumptions',
long_description=read('README.rst'),
author='insomnialab',
author_email='[email protected]',
url='https://github.com/insomnia-lab/libreant',
license='AGPL',
packages=['libreantdb',
'webant',
'webant.api',
'presets',
'archivant',
'users',
'utils',
'cli',
'conf'],
install_requires=[
'gevent',
'elasticsearch >=1, <2',
'flask-bootstrap',
'Flask-Babel',
'flask-script',
'Flask-Authbone >=0.2',
'Flask',
'opensearch',
'Fsdb',
'click',
'peewee',
'passlib >=1.6, <1.7' # version 1.7 will drop python2 suport
],
package_data = {
# If any package contains *.mo include them
# important! leave all the stars!
'webant': ['translations/*/*/*.mo']
},
include_package_data=True,
tests_require=['nose', 'coverage'],
zip_safe=False,
cmdclass={'build': build,
'test': NoseTestCommand,
'install_lib': install_lib,
'develop': develop,
'compile_translations': compile_translations},
entry_points={'console_scripts': [
'libreant=cli.libreant:libreant',
'agherant=cli.agherant:agherant',
'libreant-users=cli.libreant_users:libreant_users',
'libreant-db=cli.libreant_db:libreant_db'
]},
classifiers=[
"Framework :: Flask",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Development Status :: 4 - Beta"
])
if __name__ == '__main__':
setup(**conf)
| agpl-3.0 | 2,546,213,233,996,152,300 | 32.308725 | 113 | 0.532138 | false |
dinos66/termAnalysis | forTateDataset/termAnalysisDynamic.py | 1 | 80493 | # -*- coding: utf-8 -*-
'''
Create adjacency matrices and analyse terms dynamically
'''
print('Create dynamic adjacency matrices and ESOMs')
#--------------------------------------------
#run create_Info_Files.py before running this
#--------------------------------------------
import pickle, time, igraph, glob, os, somoclu, collections
import itertools, codecs, seaborn, math, pprint, random, re
from matplotlib import rc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import interactive
from scipy.spatial import distance
from matplotlib.pyplot import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.colors as colors
import seaborn as sns
import sklearn.cluster as clusterAlgs
from nltk.corpus import wordnet as wn
#--------------------------------------------
print(time.asctime( time.localtime(time.time()) ))
t = time.time()
edgeReadPath = './data/artworks_tmp/edges/dynamic'
adjMatWritePath = './data/artworks_tmp/adjacencyMats/dynamic'
distMatWritePath = './other_data/artworks_distanceMats'
potentMatWritePath = './other_data/artworks_potentialMats'
gravMatWritePath = './other_data/artworks_gravityMats'
umatrixWritePath = './other_data/artworks_UMX'
splitsMergesWritePath = './data/splitsMerges'
figWritePath = './data/artworks_figs'
greenwichFigWritePath = figWritePath+'/greenwichForGIF'
greenwichUmatrixWritePath = umatrixWritePath+'/greenwich'
gephiWritePath = './other_data/artworks_gephi'
statsWritePath = './data/artworks_stats'
if not os.path.exists(adjMatWritePath):
os.makedirs(adjMatWritePath)
if not os.path.exists(edgeReadPath):
os.makedirs(edgeReadPath)
if not os.path.exists(distMatWritePath):
os.makedirs(distMatWritePath)
if not os.path.exists(potentMatWritePath):
os.makedirs(potentMatWritePath)
os.makedirs(gravMatWritePath)
if not os.path.exists(umatrixWritePath):
os.makedirs(umatrixWritePath)
if not os.path.exists(figWritePath):
os.makedirs(figWritePath)
# if not os.path.exists(gephiWritePath):
# os.makedirs(gephiWritePath)
if not os.path.exists(greenwichFigWritePath):
os.makedirs(greenwichFigWritePath)
if not os.path.exists(greenwichUmatrixWritePath):
os.makedirs(greenwichUmatrixWritePath)
if not os.path.exists(splitsMergesWritePath):
os.makedirs(splitsMergesWritePath)
LVLs = ['lvl1']#,'lvl2','lvl3','lvlA'] #'lvl1','lvl2','lvl3','lvlA'
yearPeriods = ['1800s']#,'2000s'] #
heatmapFonts = [12,7,6,4]#12,7,6,
trueYearsIni = [1800,1964]#
termLabelDict = pickle.load(open('./data/artworks_verification_labels/WmatrixLabelDict.pck','rb'))
def recRank(mylist):#Perform the Reciprocal Rank Fusion for a list of rank values
finscore = []
mylist=[x+1 for x in mylist]
for rank in mylist:
finscore.append(1/(20+rank))
return sum(finscore)
def toroidDistance(myarray,width,height):
somDim2 = []
for idx,x in enumerate(myarray[:-1]):
newxa = myarray[idx+1:]
for nx in newxa:
somDim2.append(np.sqrt(min(abs(x[0] - nx[0]), width - abs(x[0] - nx[0]))**2 + min(abs(x[1] - nx[1]), height - abs(x[1]-nx[1]))**2))
SD = np.array(somDim2)
return distance.squareform(SD)
def toroidDistanceSingle(coords1,coords2,width,height):
return (np.sqrt(min(abs(coords1[0] - coords2[0]), width - abs(coords1[0] - coords2[0]))**2 + min(abs(coords1[1] - coords2[1]), height - abs(coords1[1]-coords2[1]))**2))
def toroidCoordinateFinder(coorx,distx,coory,disty,w,h):
if coorx+distx>=w:
ncx = coorx+distx-w
elif coorx+distx<0:
ncx = w+coorx+distx
else:
ncx = coorx+distx
if coory+disty>=h:
ncy = coory+disty-h
elif coory+disty<0:
ncy = h+coory+disty
else:
ncy = coory+disty
return (ncx,ncy)
def wordnetHypernymLemmaExtractor(cat):
tmpHypernym = []
# for sn in wn.synsets(cat, pos=wn.NOUN)[0]:
try:
sn = wn.synsets(cat, pos=wn.NOUN)[0]
for snh in sn.hypernyms():
tmpHypernym.extend(snh.lemma_names())
except:
try:
sn = wn.synsets(cat)[0]
for snh in sn.hypernyms():
tmpHypernym.extend(snh.lemma_names())
except:
pass
pass
return sorted(list(set(tmpHypernym)))
def wordnetClusterSimilarityComputer(clusterTerms):
allcombs = itertools.combinations(clusterTerms,2)
maxSimilarities = []
for tt in allcombs:
ss1 = wn.synsets(tt[0])
ss2 = wn.synsets(tt[1])
try:
maxSimilarities.append(max(s1.path_similarity(s2) for (s1, s2) in product(ss1, ss2)))
except:
pass
return maxSimilarities/len(list(allcombs))
for lIdx,lvl in enumerate(LVLs):
heatMapFont = heatmapFonts[lIdx]
for idy,years in enumerate(yearPeriods):
files = glob.glob(edgeReadPath+'/'+years+lvl+'_*.csv')
files.sort(key=lambda x: os.path.getmtime(x))
try:
dataDict = pickle.load(open('./data/artworks_tmp/edgeDictDynamic'+years+lvl+'.pck','rb'))
except:
dataDict = {'uniquePersistentTerms':[]}
termsYears = []
for filename in files:
periodIdx = filename[filename.index(lvl)+5:-4]
tmpTerms = []
dataDict[periodIdx] = {}
with codecs.open(filename, 'r','utf8') as f:
# print(filename)
adjList = []
next(f)
for line in f:
line = line.split(',')
tripletuple = line[:2]
tmpTerms.extend(tripletuple)
tripletuple.append(int(line[2].strip()))
adjList.append(tuple(tripletuple))
dataDict[periodIdx]['adjList'] = adjList
termsYears.append(list(set(tmpTerms)))
print('There are %s unique nodes for period %s' %(len(termsYears[-1]),periodIdx))
repetitiveTerms = collections.Counter(list(itertools.chain.from_iterable(termsYears)))
dataDict['allTerms'] = list(repetitiveTerms.keys())
dataDict['uniquePersistentTerms'] = [x for x,v in repetitiveTerms.items() if v == len(files)]
dataDict['uniquePersistentTerms'].sort()
pass
with open(statsWritePath+'/'+years+lvl+'_unique_persistent_terms.txt','w') as f:
for word in dataDict['uniquePersistentTerms']:
f.write(word+'\n')
statement = ('For %s in the %s there are %s unique persistent terms globally out of %s unique terms' %(lvl,years,len(dataDict['uniquePersistentTerms']),len(dataDict['allTerms'])))
print(statement)
'''set up SOM'''#--------------------------------------------------------------------
# n_columns, n_rows = 100, 60
# n_columns, n_rows = 150, 90
# n_columns, n_rows = 200, 120
# lablshift = 1
#------------------------------
if lvl == 'lvl1':
n_columns, n_rows = 20, 12
lablshift = .2
elif lvl == 'lvl2':
n_columns, n_rows = 40, 24
lablshift = .3
elif lvl == 'lvl3':
n_columns, n_rows = 50, 30
lablshift = .4
elif lvl == 'lvlA':
n_columns, n_rows = 60, 40
lablshift = .5 #------------
epochs2 = 3
som = somoclu.Somoclu(n_columns, n_rows, maptype="toroid", initialization="pca")
savefig = True
SOMdimensionsString = 'x'.join([str(x) for x in [n_columns,n_rows]])
print('SOM dimension is: %s' %SOMdimensionsString)
#--------------------------------------------------------------------------------
yearList = []
count = 0
termPrRanks, termAuthRanks, termHubRanks, termBetweenRanks = {}, {}, {}, {}
histoNormAggList = []
for filename in files:
periodIdx = filename[filename.index(lvl)+5:-4]
yearList.append(periodIdx)
print(periodIdx)
gUndirected=igraph.Graph.Full(0, directed = False)
gUndirected.es['weight'] = 1
'''ReRanking the nodes based on their reciprocal rank between timeslots'''
try:
gUndirected.add_vertices(dataDict['topTermsByPR'])
print('used top Terms By PageRank')
# print(dataDict['topTermsByPR'][:5])
except:
gUndirected.add_vertices(dataDict['uniquePersistentTerms'])
print('used alphabetically ranked terms')
pass
myEdges,myWeights = [], []
nodesWithEdges = []
WMXtermFrequencies = {termLabelDict[" ".join(re.findall("[a-z]+", x))]['code']:0 for x in dataDict['uniquePersistentTerms']}
for x in dataDict[periodIdx]['adjList']:
if x[0] in dataDict['uniquePersistentTerms'] and x[1] in dataDict['uniquePersistentTerms']:
myEdges.append((x[0],x[1]))
myWeights.append(x[2])
nodesWithEdges.extend(x[:2])
WMXtermFrequencies[termLabelDict[" ".join(re.findall("[a-z]+", x[0]))]['code']] += x[2]
WMXtermFrequencies[termLabelDict[" ".join(re.findall("[a-z]+", x[1]))]['code']] += x[2]
print('Full No of edges: %s and pruned No of edges %s' %(len(dataDict[periodIdx]['adjList']),len(myEdges)))
gUndirected.add_edges(myEdges)
gUndirected.es["weight"] = myWeights
dataDict[periodIdx]['graph'] = gUndirected
gUndirected.vs['label'] = gUndirected.vs['name']
# nodes = gUndirected.vs['name']
# print(nodes[:5])
#-----------------------------------------------------------------------------------------------
'''creating undirected adjacency mat'''#--------------------------------------------------------
#-----------------------------------------------------------------------------------------------
# # print('creating adjacency matrix')
# adjMat = gUndirected.get_adjacency(attribute='weight')
# adjMat = np.array(adjMat.data)
# # print('writing undirected adjacency matrix to file')
# with open(adjMatWritePath+'/AdjMat'+years+lvl+'_'+periodIdx+'.txt', 'w') as d:
# d.write('Term\t'+'\t'.join(nodes)+'\n')
# for s in nodes:
# distLine = [str(x) for x in adjMat[nodes.index(s)].tolist()]
# d.write(s+'\t'+'\t'.join(distLine)+'\n')
# # write adjMat with nums instead of labels
# with open(adjMatWritePath+'/nummedAdjMat'+years+lvl+'_'+periodIdx+'.txt', 'w') as d:
# d.write('Term\t'+'\t'.join([str(x) for x in range(len(nodes))])+'\n')
# # with open(figWritePath+'/nodeIdMapping'+years+lvl+'.tsv','w') as f:
# for idx,s in enumerate(nodes):
# distLine = [str(x) for x in adjMat[nodes.index(s)].tolist()]
# d.write(str(idx)+'\t'+'\t'.join(distLine)+'\n')
# # # f.write(str(idx)+'\t'+s+'\n')
#-----------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
'''Extract centrality measures'''#-----------------------------------------------
#--------------------------------------------------------------------------------
# dataDict[periodIdx]['term'] = {'degree':{},'pageRank':{},'maxnormPageRank':{}, 'minnormPageRank':{}, 'authority':{}, 'hub':{}, 'betweenness':{}}
# pageRank = gUndirected.pagerank(weights = 'weight', directed=False)
# authority = gUndirected.authority_score(weights = 'weight') #HITS authority score
# hub = gUndirected.hub_score(weights = 'weight')#HITS hub score
# betweenness = gUndirected.betweenness(weights = 'weight', directed = False)
# # print('extracted pagerank')
# maxPR = max(pageRank)
# maxnormPageRank = [x/maxPR for x in pageRank]
# minPR = min(pageRank)
# minnormPageRank = [x/minPR for x in pageRank]
# maxminPr = max(minnormPageRank)
# minmaxPRdiff = maxPR-minPR
# minmaxnormPageRank = [1+3*((x-minPR)/minmaxPRdiff) for x in pageRank]
# for x in nodes:
# dataDict[periodIdx]['term']['pageRank'][x] = pageRank[nodes.index(x)]
# dataDict[periodIdx]['term']['maxnormPageRank'][x] = maxnormPageRank[nodes.index(x)]
# dataDict[periodIdx]['term']['minnormPageRank'][x] = minnormPageRank[nodes.index(x)]
# dataDict[periodIdx]['term']['degree'][x] = gUndirected.degree(x)
# dataDict[periodIdx]['term']['authority'][x] = authority[nodes.index(x)]
# dataDict[periodIdx]['term']['hub'][x] = hub[nodes.index(x)]
# dataDict[periodIdx]['term']['betweenness'][x] = betweenness[nodes.index(x)]
# tmpPRrank = sorted(dataDict[periodIdx]['term']['pageRank'], key=lambda k: [dataDict[periodIdx]['term']['pageRank'][k],dataDict[periodIdx]['term']['degree'][k],k],reverse =True)
# for x in nodes:
# if x not in termPrRanks:
# termPrRanks[x] = [tmpPRrank.index(x)]
# else:
# termPrRanks[x].append(tmpPRrank.index(x))
# tmpAuthrank = sorted(dataDict[periodIdx]['term']['authority'], key=lambda k: [dataDict[periodIdx]['term']['authority'][k],dataDict[periodIdx]['term']['degree'][k],k],reverse =True)
# for x in nodes:
# if x not in termAuthRanks:
# termAuthRanks[x] = [tmpAuthrank.index(x)]
# else:
# termAuthRanks[x].append(tmpAuthrank.index(x))
# tmpHubrank = sorted(dataDict[periodIdx]['term']['hub'], key=lambda k: [dataDict[periodIdx]['term']['hub'][k],dataDict[periodIdx]['term']['degree'][k],k],reverse =True)
# for x in nodes:
# if x not in termHubRanks:
# termHubRanks[x] = [tmpHubrank.index(x)]
# else:
# termHubRanks[x].append(tmpHubrank.index(x))
# tmpBetweenrank = sorted(dataDict[periodIdx]['term']['betweenness'], key=lambda k: [dataDict[periodIdx]['term']['betweenness'][k],dataDict[periodIdx]['term']['degree'][k],k],reverse =True)
# for x in nodes:
# if x not in termBetweenRanks:
# termBetweenRanks[x] = [tmpBetweenrank.index(x)]
# else:
# termBetweenRanks[x].append(tmpBetweenrank.index(x))
# -----------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------
'''make WMX code histograms'''#------------------------------------------------------------------
# labelFormat = 'code' #switch terms by Wmatrix code or label?
# allWMXhistLabels = sorted(list(set([termLabelDict[" ".join(re.findall("[a-z]+", x))][labelFormat] for x in nodes])))
# if not os.path.exists(figWritePath+'/histograms/non-normalized/'):
# os.makedirs(figWritePath+'/histograms/non-normalized/')
# #make histograms of WMX codes per timeslot------------------------------------------------------
# allWMXhistVals = [WMXtermFrequencies[x] for x in allWMXhistLabels]
# fig, ax = plt.subplots()
# ind = np.arange(len(allWMXhistVals))
# ax.bar(ind,allWMXhistVals,color = 'r')
# ax.set_ylabel('Frequency')
# ax.set_xlabel('WMX codes')
# ax.set_xticks(ind+0.5)
# ax.set_xticklabels(allWMXhistLabels,rotation = 90, fontsize=heatMapFont+2)
# plt.title('WMX category histogram per timeslot (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/histograms/non-normalized/WMXcodeDistribution'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# # make normalized histograms of WMX codes per timeslot------------------------------------------------------
# if not os.path.exists(figWritePath+'/histograms/normalized'):
# os.makedirs(figWritePath+'/histograms/normalized')
# labelFormat = 'code' #switch terms by Wmatrix code or label?
# allWMXhistVals = [WMXtermFrequencies[x] for x in allWMXhistLabels]
# maxallWMXhistVals = max(allWMXhistVals)
# allWMXhistVals = [x/maxallWMXhistVals for x in allWMXhistVals]
# fig, ax = plt.subplots()
# ind = np.arange(len(allWMXhistVals))
# ax.bar(ind,allWMXhistVals,color = 'r')
# ax.set_ylabel('Frequency')
# ax.set_xlabel('WMX codes')
# ax.set_xticks(ind+0.45)
# ax.set_xticklabels(allWMXhistLabels,rotation = 90, fontsize=heatMapFont+2)
# plt.title('WMX category histogram per timeslot (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/histograms/normalized/WMXcodeDistribution'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# histoNormAggList.append(allWMXhistVals)
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''symmetrical distance matrix extraction'''
#-----------------------------------------------------------------------------------------------
# print('estimate symmetrical distance matrix')
# distMat = distance.pdist(adjMat, 'euclidean')
# distMat = distance.squareform(distMat)
# '''Write the symmetrical distance matrix to a file'''
# print('writing symmetrical distance matrix to file')
# with open(distMatWritePath+'/distMat'+years+lvl+'_'+periodIdx+'.tsv', 'w') as d:
# d.write('Term\t'+'\t'.join(nodes)+'\n')
# for s in nodes:
# distLine = [str(float(x)) for x in distMat[nodes.index(s)].tolist()]
# d.write(s+'\t'+'\t'.join(distLine)+'\n')
# '''plotting heatmap of symmetrical distance matrix'''
# print('plotting heatmap of symmetrical distance matrix')
# sns.set(style="darkgrid")
# fig, ax = plt.subplots()
# ax = sns.heatmap(distMat, square = True)#,xticklabels=2,ax=ax)
# # ax.set_xticks(range(0, len(nodes), 4))#, minor=False)
# ax.xaxis.tick_top()
# ax.set_yticklabels(list(reversed(nodes)), minor=False, fontsize = heatMapFont)
# plt.yticks(rotation=0)
# ax.set_xticklabels(nodes, minor=False, fontsize = heatMapFont, rotation = 90)
# plt.xlabel('euclidean distance matrix heatmap (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/plain euclidean distance heatmaps/plainEucl_distMatHeatmap_'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
#-----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------
'''SOM data extraction from here on------------------------------------------------------------------------------------'''
# ------------------------------------------------------------------------------------------------------------------------
'''Extract Self Organizing Maps of undirected weighted adj mats'''#change filename depending on labeled or numbered terms
nummedOrNot = ''#'nummed' are the labels numbers or text (leave blank)?
labelFormat = 'code' #switch terms by Wmatrix code or label?
df = pd.read_table(adjMatWritePath+'/'+nummedOrNot+'AdjMat'+years+lvl+'_'+periodIdx+'.txt', sep="\t", header=0,index_col=0)
dfmax = df.max()
dfmax[dfmax == 0] = 1
df = df / dfmax
originallabels = df.index.tolist()
nodes = df.index.tolist()
# print(originallabels[:5])
labels = originallabels
wmxcodelabels = [termLabelDict[" ".join(re.findall("[a-z]+", x))]['code'] for x in originallabels]
wmxtagslabels = [termLabelDict[" ".join(re.findall("[a-z]+", x))]['label'] for x in originallabels]
som.update_data(df.values)
if periodIdx == yearList[0]:
epochs = 10
radius0 = 0
scale0 = 0.1
else:
radius0 = n_rows//5
scale0 = 0.03
epochs = epochs2
som.train(epochs=epochs, radius0=radius0, scale0=scale0)
'''----------------------clustering params-----------'''
clusterAlgLabel = 'AffinityPropagation' # KMeans8 , SpectralClustering,AffinityPropagation, Birch
if clusterAlgLabel == 'Birch':
algorithm = clusterAlgs.Birch()
elif clusterAlgLabel == 'AffinityPropagation':
original_shape = som.codebook.shape
som.codebook.shape = (som._n_columns*som._n_rows, som.n_dim)
init = -np.max(distance.pdist(som.codebook, 'euclidean'))
som.codebook.shape = original_shape
algorithm = clusterAlgs.AffinityPropagation(preference = init,damping = 0.9)
elif clusterAlgLabel == 'KMeans8':
algorithm = None
# elif clusterAlgLabel == 'XMeans':
# algorithm =
print('Clustering algorithm employed: %s' %clusterAlgLabel)
som.cluster(algorithm=algorithm)
'''----------------------clustering params-----------'''
if savefig:
if not os.path.exists(figWritePath+'/'+clusterAlgLabel+'Clusters/SOMs/'+SOMdimensionsString):
os.makedirs(figWritePath+'/'+clusterAlgLabel+'Clusters/SOMs/'+SOMdimensionsString)
SOMfilename = figWritePath+'/'+clusterAlgLabel+'Clusters/SOMs/'+SOMdimensionsString+'/SOM_'+nummedOrNot+'AdjMat'+years+lvl+'_'+periodIdx+'.png'
SOMfilenameWmxCode = figWritePath+'/'+clusterAlgLabel+'Clusters/SOMs/'+SOMdimensionsString+'/wmxCodeSOM_AdjMat'+years+lvl+'_'+periodIdx+'.png'
SOMfilenameWmxTags = figWritePath+'/'+clusterAlgLabel+'Clusters/SOMs/'+SOMdimensionsString+'/wmxTagsSOM_AdjMat'+years+lvl+'_'+periodIdx+'.png'
# SOMfilenameNoBMUs = figWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString+'/noBMUsSOM_AdjMat'+years+lvl+'_'+periodIdx+'.png'
else:
SOMfilename = None
rc('font', **{'size': 11}); figsize = (20, 20/float(n_columns/n_rows))
# som.view_umatrix(figsize = figsize, colormap="Spectral_r", bestmatches=True, labels=labels, filename=SOMfilename)
# plt.close()
# som.view_umatrix(figsize = figsize, colormap="Spectral_r", bestmatches=True, labels=wmxcodelabels, filename=SOMfilenameWmxCode)
# plt.close()
# som.view_umatrix(figsize = figsize, colormap="Spectral_r", bestmatches=True, labels=wmxtagslabels, filename=SOMfilenameWmxTags)
# plt.close()
dataDict[periodIdx]['somCoords'] = {SOMdimensionsString:som.bmus}
colors = []
for bm in som.bmus:
colors.append(som.clusters[bm[1], bm[0]])
areas = [200]*len(som.bmus)
# areas = [x*70 for x in minmaxnormPageRank]
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''write clustered BMUs, create piecharts of their WMX and WordNet categories and estimate cluster internal average similarity'''#------------------------
#-----------------------------------------------------------------------------------------------
# if not os.path.exists(figWritePath+'/'+clusterAlgLabel+'Clusters/piecharts/'+SOMdimensionsString):
# os.makedirs(figWritePath+'/'+clusterAlgLabel+'Clusters/piecharts/'+SOMdimensionsString)
# if not os.path.exists(figWritePath+'/'+clusterAlgLabel+'Clusters/bagsOfClusteredTerms/'+SOMdimensionsString):
# os.makedirs(figWritePath+'/'+clusterAlgLabel+'Clusters/bagsOfClusteredTerms/'+SOMdimensionsString)
# if not os.path.exists(figWritePath+'/bagsOfBMUclusteredTerms/'+SOMdimensionsString):
# os.makedirs(figWritePath+'/bagsOfBMUclusteredTerms/'+SOMdimensionsString)
# dataDict[periodIdx]['clusterDict'] = {}
# dataDict[periodIdx]['bmuClusterDict'] = {}
# for idx,bm in enumerate(som.bmus):
# clusterName = som.clusters[bm[1], bm[0]]
# bmuClusterName = ','.join([str(x) for x in [bm[1], bm[0]]])
# if clusterName in dataDict[periodIdx]['clusterDict']:
# dataDict[periodIdx]['clusterDict'][clusterName]['term'].append(nodes[idx])
# dataDict[periodIdx]['clusterDict'][clusterName]['WMXcode'].append(termLabelDict[" ".join(re.findall("[a-z]+", nodes[idx]))][labelFormat])
# dataDict[periodIdx]['clusterDict'][clusterName]['WNhypernyms'].extend(wordnetHypernymLemmaExtractor(" ".join(re.findall("[a-z]+", nodes[idx]))))
# else:
# dataDict[periodIdx]['clusterDict'][clusterName] = {'term':[nodes[idx]],'WNhypernyms': wordnetHypernymLemmaExtractor(nodes[idx]),'WMXcode':[termLabelDict[" ".join(re.findall("[a-z]+", nodes[idx]))][labelFormat]]}
# if bmuClusterName in dataDict[periodIdx]['bmuClusterDict']:
# dataDict[periodIdx]['bmuClusterDict'][bmuClusterName]['term'].append(nodes[idx])
# dataDict[periodIdx]['bmuClusterDict'][bmuClusterName]['WMXcode'].append(termLabelDict[" ".join(re.findall("[a-z]+", nodes[idx]))][labelFormat])
# dataDict[periodIdx]['bmuClusterDict'][bmuClusterName]['WNhypernyms'].extend(wordnetHypernymLemmaExtractor(" ".join(re.findall("[a-z]+", nodes[idx]))))
# else:
# dataDict[periodIdx]['bmuClusterDict'][bmuClusterName] = {'term':[nodes[idx]],'WNhypernyms': wordnetHypernymLemmaExtractor(nodes[idx]),'WMXcode':[termLabelDict[" ".join(re.findall("[a-z]+", nodes[idx]))][labelFormat]]}
# clusterNames = list(dataDict[periodIdx]['clusterDict'].keys())
# bmuClusterNames = list(dataDict[periodIdx]['bmuClusterDict'].keys())
# #write bags of clustered terms by BMU coordinate
# bmuRankedClusterNames = sorted(dataDict[periodIdx]['bmuClusterDict'],key = lambda k: len(dataDict[periodIdx]['bmuClusterDict'][k]['term']),reverse = True)
# with open(figWritePath+'/bagsOfBMUclusteredTerms/'+SOMdimensionsString+'/boct'+years+lvl+'_'+periodIdx+'.tsv','w') as f:
# for cN in bmuRankedClusterNames:
# f.write(str(cN)+'\t'+','.join(dataDict[periodIdx]['bmuClusterDict'][cN]['term'])+'\n')
# #write bags of clustered terms based on external clustering method e.g. affinity prop
# rankedClusterNames = sorted(dataDict[periodIdx]['clusterDict'],key = lambda k: len(dataDict[periodIdx]['clusterDict'][k]['term']),reverse = True)
# countWMXcodes,countWNhypernyms = {}, {}
# clusterNames = []
# with open(figWritePath+'/'+clusterAlgLabel+'Clusters/bagsOfClusteredTerms/'+SOMdimensionsString+'/boct'+years+lvl+'_'+periodIdx+'.tsv','w') as f:
# for cN in rankedClusterNames:
# f.write(str(cN)+'\t'+','.join(dataDict[periodIdx]['clusterDict'][cN]['term'])+'\n')
# if len(dataDict[periodIdx]['clusterDict'][cN]['WMXcode']) > 1:
# countWMXcodes[cN] = collections.Counter(dataDict[periodIdx]['clusterDict'][cN]['WMXcode'])
# countWNhypernyms[cN] = collections.Counter(dataDict[periodIdx]['clusterDict'][cN]['WNhypernyms'])
# clusterNames.append(cN)
# clusterNum = len(countWMXcodes)
# '''make pie charts of each cluster'''#----------------------------------------------------------
# pieclmns = 10
# if pieclmns>clusterNum:
# pieclmns = clusterNum
# if clusterNum == 0:
# pieclmns = clusterNum = 1
# clusterNames = ['no rich clusters']
# countWMXcodes = {'no rich clusters':{'no rich clusters':1}}
# countWNhypernyms = {'no rich clusters':{'no rich clusters':1}}
# if pieclmns == 1:
# pieclmns = 2
# # elif clusterNum == 1:
# pierows = math.ceil(clusterNum/pieclmns)
# possibleAxes = list(itertools.product(range(pierows),range(pieclmns)))
# fig, axarr = plt.subplots(pierows,pieclmns)
# axarr.shape = (pierows,pieclmns)
# fig.suptitle('Clustered WMX category vizualization (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# idx=0
# for cN in clusterNames:
# umxLabels = list(countWMXcodes[cN].keys())
# umxSizes = list(countWMXcodes[cN].values())
# totalumxSizes = sum(umxSizes)
# tmpcoords = (int(idx%pierows),int(idx//pierows))
# axarr[tmpcoords].pie(umxSizes, labels=umxLabels, startangle=90,radius=1,autopct=lambda p:'{:.0f}'.format(p*totalumxSizes/100))#autopct='%1.1f%%', , frame=True)#radius=1/clusterNum,
# axarr[tmpcoords].set_aspect('equal')
# axarr[tmpcoords].set_title('cluster %s'%cN)
# possibleAxes.remove((int(idx%math.ceil(clusterNum/pieclmns)),int(idx//math.ceil(clusterNum/pieclmns))))
# idx+=1
# for x in possibleAxes:
# fig.delaxes(axarr[x])
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/'+clusterAlgLabel+'Clusters/piecharts/'+SOMdimensionsString+'/WMXpie'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# #---------------------WNhypernym pies below-------------
# possibleAxes = list(itertools.product(range(pierows),range(pieclmns)))
# fig, axarr = plt.subplots(pierows,pieclmns)
# axarr.shape = (pierows,pieclmns)
# fig.suptitle('Clustered WNhypernym vizualization (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# idx=0
# for cN in clusterNames:
# WNLabels = list(countWNhypernyms[cN].keys())
# WNSizes = list(countWNhypernyms[cN].values())
# totalumxSizes = sum(WNSizes)
# tmpcoords = (int(idx%pierows),int(idx//pierows))
# axarr[tmpcoords].pie(WNSizes, labels=WNLabels, startangle=90,radius=1,autopct=lambda p:'{:.0f}'.format(p*totalumxSizes/100))#autopct='%1.1f%%', , frame=True)#radius=1/clusterNum,
# axarr[tmpcoords].set_aspect('equal')
# axarr[tmpcoords].set_title('cluster %s'%cN)
# possibleAxes.remove((int(idx%math.ceil(clusterNum/pieclmns)),int(idx//math.ceil(clusterNum/pieclmns))))
# idx+=1
# for x in possibleAxes:
# fig.delaxes(axarr[x])
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/'+clusterAlgLabel+'Clusters/piecharts/'+SOMdimensionsString+'/wordNetPie'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''write and show the umatrix (umx)'''#---------------------------------------------------------
#-----------------------------------------------------------------------------------------------
# somUmatrix = som.umatrix
# print('writing umatrix to file')
# np.savetxt(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.umx',somUmatrix,delimiter='\t', newline='\n',header='% '+ '%s %s'%(n_rows,n_columns))
# print('writing BMU coords to file')
# with open(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.bm','w') as f:
# with open(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.names','w') as fn:
# f.write('% '+'%s %s\n' %(n_rows,n_columns))
# fn.write('% '+str(len(nodes))+'\n')
# for idx,coos in enumerate(som.bmus):
# f.write('%s %s %s\n' %(idx,coos[1],coos[0]))
# fn.write('%s %s %s\n' %(idx,nodes[idx],nodes[idx]))
# print('plotting umatrix 3D surface')
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# X = np.arange(0, n_columns, 1)
# Y = np.arange(0, n_rows, 1)
# X, Y = np.meshgrid(X, Y)
# N=somUmatrix/somUmatrix.max()
# surf = ax.plot_surface(X, Y, somUmatrix, facecolors=cm.jet(N),rstride=1, cstride=1)#,facecolors=cm.jet(somUmatrix) cmap=cm.coolwarm, linewidth=0, antialiased=False)
# m = cm.ScalarMappable(cmap=cm.jet)
# m.set_array(somUmatrix)
# plt.colorbar(m, shrink=0.5, aspect=5)
# plt.title('SOM umatrix 3D surface vizualization (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/SOM Umatrices/umxSurf'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# #-----------------------------------------------------------------------------------------------
# '''Plotting BMU coordinates with labels'''#-----------------------------------------------------
# #-----------------------------------------------------------------------------------------------
# if not os.path.exists(figWritePath+'/'+clusterAlgLabel+'Clusters/bmuCoordsWlabels/'+SOMdimensionsString):
# os.makedirs(figWritePath+'/'+clusterAlgLabel+'Clusters/bmuCoordsWlabels/'+SOMdimensionsString)
# labelFormat = 'code'
# fig, ax = plt.subplots()
# xDimension = [x[0] for x in som.bmus]#[:10]]
# yDimension = [x[1] for x in som.bmus]#[:10]]
# plt.scatter(xDimension,yDimension, c=colors, s = areas, alpha = 0.7)
# labels = [str(colors[x])+'_'+termLabelDict[" ".join(re.findall("[a-z]+", nodes[x]))][labelFormat] for x in range(len(xDimension))]
# doneLabs = set([''])
# for label, x, y in zip(labels, xDimension, yDimension):
# lblshiftRatio = 2
# labFinshift = ''
# while labFinshift in doneLabs:
# potentialPositions = [(x, y+lablshift), (x+lblshiftRatio*lablshift, y), (x-lblshiftRatio*lablshift, y), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
# (x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y-lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
# (x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift)]
# for pP in potentialPositions:
# labFinshift = pP
# if labFinshift not in doneLabs:
# break
# lblshiftRatio+=1
# doneLabs.add(labFinshift)
# plt.annotate(label, xy = (x, y), xytext = labFinshift, textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'white', alpha = 0.4))
# lIdx+=1
# xCc,yCc = [] ,[]
# for x in som.centroidBMcoords:
# if som.clusters[x[0], x[1]] in colors:
# xCc.append(x[1])
# yCc.append(x[0])
# plt.scatter(xCc,yCc, c= range(len(xCc)), s= [1000]*len(xCc), alpha = 0.4)
# plt.xlim(0,n_columns)
# plt.ylim(0,n_rows)
# # ax.invert_yaxis()
# plt.title('Labeled SOM. Level '+lvl+' terms, timeslot '+periodIdx+' (5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/'+clusterAlgLabel+'Clusters/bmuCoordsWlabels/'+SOMdimensionsString+'/SOM_Wmatrix'+labelFormat+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''SOMdistance matrix quiver plots between timeslots'''#----------------------------------------
#-----------------------------------------------------------------------------------------------
# if int(periodIdx)>0:
# # X,Y = np.meshgrid( np.arange(len(nodes)),np.arange(len(nodes)) )
# X1 = [x[0] for x in dataDict[str(int(periodIdx)-1)]['somCoords'][SOMdimensionsString][:10]]
# Y1 = [x[1] for x in dataDict[str(int(periodIdx)-1)]['somCoords'][SOMdimensionsString][:10]]
# X2 = [x[0] for x in dataDict[periodIdx]['somCoords'][SOMdimensionsString][:10]]
# Y2 = [x[1] for x in dataDict[periodIdx]['somCoords'][SOMdimensionsString][:10]]
# PRcolors = [dataDict[periodIdx]['term']['pageRank'][x] for x in nodes[:10]]
# pprint.pprint([X1,Y1])
# pprint.pprint([X2,Y2])
# fig, ax = plt.subplots()
# # X,Y = np.meshgrid(X1,Y1)
# Q = plt.quiver(X1,Y1,X2,Y2, PRcolors, cmap=cm.seismic)#,headlength=5)#
# plt.xlim(0,n_columns)
# plt.ylim(0,n_rows)
# # ax.set_xticks(range(0, len(nodes)))#, minor=False)
# # ax.xaxis.tick_top()
# # ax.set_yticklabels(list(reversed(nodes)), minor=False, fontsize = heatMapFont)
# plt.yticks(rotation=0)
# # ax.set_xticklabels(nodes, minor=False, fontsize = heatMapFont, rotation = 90)
# ax.invert_yaxis()
# plt.colorbar()
# plt.title('SOM movement quiver plot. Level '+lvl+' terms, timeslot '+periodIdx+' (5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/SOM_distanceMatQuiver_'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
#-----------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------
'''SOM distance matrix extraction'''#-----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------
# print('estimate SOM distance matrix')
# distMatSOM = toroidDistance(som.bmus,n_columns,n_rows)
# '''Write the SOM distance matrix to a file'''
# print('writing SOM distance matrix to file')
# with open(distMatWritePath+'/distMatSOM'+years+lvl+'_'+periodIdx+'.tsv', 'w') as d:
# d.write('Term\t'+'\t'.join(nodes)+'\n')
# for s in nodes:
# distLine = [str(float(x)) for x in distMatSOM[nodes.index(s)].tolist()]
# d.write(s+'\t'+'\t'.join(distLine)+'\n')
# '''plotting heatmap of distance matrix using som'''
# print('plotting heatmap of distance matrix using som')
# if not os.path.exists(figWritePath+'/SOM euclidean distance Heatmaps'):
# os.makedirs(figWritePath+'/SOM euclidean distance Heatmaps')
# sns.set(style="darkgrid")
# fig, ax = plt.subplots()
# ax = sns.heatmap(distMatSOM, square = True)#,xticklabels=2,ax=ax)
# # ax.set_xticks(range(0, len(nodes), 4))#, minor=False)
# ax.set_yticklabels(list(reversed(nodes)), minor=False, fontsize = heatMapFont)
# plt.yticks(rotation=0)
# ax.xaxis.tick_top()
# ax.set_xticklabels(nodes, minor=False, fontsize = heatMapFont, rotation = 90)
# plt.xlabel('SOM distance matrix heatmap (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/SOM euclidean distance Heatmaps/SOM_distMatHeatmap_'+years+lvl+'_'+periodIdx+'.png')#,bbox_inches='tight')
# plt.close()
# interactive(False)
# #-----------------------------------------------------------------------------------------------
# #------------------------------------------------------------------------------------------------------------
# '''potential and gravity extraction'''#----------------------------------------------------------------------
# #------------------------------------------------------------------------------------------------------------
# '''estimate the potential -(PR_userN * PR_userN+1)/distance matrix'''
# normMethod = 'SUM'#normalization method of distance matrix
# if normMethod == 'MAX':
# PRversion = 'maxnormPageRank'
# elif normMethod == 'SUM':
# PRversion = 'pageRank'
# elif normMethod == 'MIN':
# PRversion = 'minnormPageRank'
# print('estimate potential')
# potentMat = np.zeros(distMatSOM.shape)
# pgrMat = np.zeros(distMatSOM.shape)
# for n in nodes:
# potentMat[nodes.index(n)] = dataDict[periodIdx]['term'][PRversion][n]
# pgrMat[:,nodes.index(n)] = dataDict[periodIdx]['term'][PRversion][n]
# potentMat = np.multiply(potentMat,pgrMat)
# PRprodArray = potentMat.reshape(-1)
# potentMat = (-potentMat)#*1000)
# distMatPot = distMatSOM + 1
# distMatPot = distMatPot/distMatPot.sum()#make sure this complies with the normMethod
# potentMat = np.divide(potentMat,distMatPot)#e-8)
# potentMat = np.multiply(potentMat,abs(np.identity(potentMat.shape[0])-1))
# '''estimate the gravity G*(PR_userN * PR_userN+1)/distance^2 matrix'''
# print('estimate gravity')
# gravMat = np.zeros(distMatSOM.shape)
# for n in nodes:
# gravMat[nodes.index(n)] = dataDict[periodIdx]['term'][PRversion][n]
# gravMat = np.multiply(gravMat,pgrMat)
# PRprodArray = gravMat.reshape(-1)
# distMat2 = np.multiply(distMatSOM,distMatSOM)+1
# distMat2 = distMat2/distMat2.sum()#make sure this complies with the normMethod
# gravMat = np.divide(gravMat,distMat2)#e-8)
# gravMat = np.multiply(gravMat,abs(np.identity(gravMat.shape[0])-1))
# print('Max potential is %s and min potential is %s' %(potentMat.max(),potentMat.min()))
# print('Max distance is %s and min distance is %s' %(distMatSOM.max(),distMatSOM.min()))
# print('writing potential matrix to file')
# with open(potentMatWritePath+'/SOM_potentMat_'+normMethod+'normed_'+years+lvl+'_'+periodIdx+'.tsv', 'w') as d:
# d.write('Term\t'+'\t'.join(nodes)+'\n')
# for s in nodes:
# distLine = [str(float(x)) for x in potentMat[nodes.index(s)].tolist()]
# d.write(s+'\t'+'\t'.join(distLine)+'\n')
# print('Max grav is %s and min grav is %s' %(gravMat.max(),gravMat.min()))
# print('writing gravity matrix to file')
# with open(gravMatWritePath+'/SOM_gravMat_'+normMethod+'normed'+years+lvl+'_'+periodIdx+'.tsv', 'w') as d:
# d.write('Term\t'+'\t'.join(nodes)+'\n')
# for s in nodes:
# distLine = [str(float(x)) for x in gravMat[nodes.index(s)].tolist()]
# d.write(s+'\t'+'\t'.join(distLine)+'\n')
# # # #------------------------------------------------------------------------------------------------------------
# # # #-----------------------------------------------------------------------------------------------------------
# # # '''potential and gravity plots'''#--------------------------------------------------------------------------
# # # #-----------------------------------------------------------------------------------------------------------
# '''plotting heatmap of potential matrix using som'''
# print('plotting heatmap of potential matrix using som')
# if not os.path.exists(figWritePath+'/SOM potential Heatmaps'):
# os.makedirs(figWritePath+'/SOM potential Heatmaps')
# os.makedirs(figWritePath+'/SOM gravity Heatmaps')
# sns.set(style="darkgrid")
# fig, ax = plt.subplots()
# ax = sns.heatmap(potentMat, square = True)#,xticklabels=2,ax=ax)
# # ax.set_xticks(range(0, len(nodes), 4))#, minor=False)
# ax.xaxis.tick_top()
# ax.set_yticklabels(list(reversed(nodes)), minor=False, fontsize = heatMapFont)
# plt.yticks(rotation=0)
# ax.set_xticklabels(nodes, minor=False, fontsize = heatMapFont, rotation = 90)
# plt.xlabel('SOM potential matrix heatmap (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/SOM potential Heatmaps/SOM_potentMatHeatmap_'+normMethod+'normed_'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# '''plotting heatmap of gravity matrix'''
# print('plotting heatmap of gravity matrix')
# sns.set(style="darkgrid")
# fig, ax = plt.subplots()
# ax = sns.heatmap(gravMat, square = True)#,xticklabels=2,ax=ax)
# # ax.set_xticks(range(0, len(nodes), 4))#, minor=False)
# ax.xaxis.tick_top()
# ax.set_yticklabels(list(reversed(nodes)), minor=False, fontsize = heatMapFont)
# plt.yticks(rotation=0)
# ax.set_xticklabels(nodes, minor=False, fontsize = heatMapFont, rotation = 90)
# plt.xlabel('SOM gravity matrix heatmap (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/SOM gravity Heatmaps/SOM_gravMatHeatmap_'+normMethod+'normed_'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# '''Show as image'''
# fig = plt.figure()
# im = plt.imshow(np.log(gravMat), cmap='hot')
# plt.colorbar(im, orientation='vertical')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# plt.title('log10 SOM gravity image (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# interactive(True)
# plt.show()
# plt.savefig(figWritePath+'/SOM gravity Heatmaps/SOM_gravityImage_'+normMethod+'normed_'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
#------------------------------------------------------------------------------------------------------------
# #-----------------------------------------------------------------------------------------------------------
'''GREENWICH based matrix shift'''#---------------------------------------------------------------------------
# #-----------------------------------------------------------------------------------------------------------
greenSomUmatrix = som.umatrix.copy()
topTerm = dataDict['topTermsByPR'][0]
print('-----------------------------------'+topTerm)
topTermCoords = som.bmus[nodes.index(topTerm)]
# print(topTermCoords)
clmnShift = (n_columns/2) - topTermCoords[0]
# print('topTermCoords[0]: %s clmnShift: %s' %(topTermCoords[0],clmnShift))
rowShift = (n_rows/2) - topTermCoords[1]
# print('topTermCoords[1]: %s rowShift: %s' %(topTermCoords[1],rowShift))
greenSomUmatrix = np.roll(greenSomUmatrix, int(rowShift), axis=0)#int(rowShift)
greenSomUmatrix = np.roll(greenSomUmatrix, int(clmnShift), axis=1)#int(clmnShift)
'''moving centroids according to greenwich'''
xCcG,yCcG,centrCluster = [] ,[], []
for x in som.centroidBMcoords:
if som.clusters[x[0], x[1]] in colors:
dimTemp = toroidCoordinateFinder(x[1],clmnShift,x[0],rowShift,n_columns,n_rows)
xCcG.append(int(dimTemp[0]))
yCcG.append(int(dimTemp[1]))
centrCluster.append(som.clusters[x[0], x[1]])
'''Inserting BMUs by greenwich'''
xDimension, yDimension = [], []
for x in som.bmus:
dimTemp = toroidCoordinateFinder(x[0],clmnShift,x[1],rowShift,n_columns,n_rows)
xDimension.append(int(dimTemp[0]))
yDimension.append(int(dimTemp[1]))
greenBmus = zip(xDimension,yDimension)
# # print('writing BMU coords and names to file')
# # print('writing greenwich umatrix to file')
# # np.savetxt(greenwichUmatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.umx',greenSomUmatrix,delimiter='\t', newline='\n',header='% '+ '%s %s'%(n_rows,n_columns))
# # with open(greenwichUmatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.bm','w') as f:
# # with open(greenwichUmatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.names','w') as fn:
# # f.write('% '+'%s %s\n' %(n_rows,n_columns))
# # fn.write('% '+str(len(nodes))+'\n')
# # for idx,coox in enumerate(xDimension):
# # f.write('%s %s %s\n' %(idx,int(yDimension[idx]),int(coox)))
# # fn.write('%s %s %s\n' %(idx,nodes[idx],nodes[idx]))
print('plotting greenwich shifted soms')
if not os.path.exists(greenwichFigWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString):
os.makedirs(greenwichFigWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString)
fig, ax = plt.subplots()
plt.imshow(greenSomUmatrix,cmap = 'Spectral_r', aspect = 'auto')
ax.scatter(xDimension,yDimension,s=areas,c=colors)
doneLabs = set([''])
lIdx = 0
for label, x, y in zip(originallabels, xDimension, yDimension):
if label == topTerm:
plt.annotate(label, xy = (x, y), xytext = (x, y-lablshift), textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'red'))
topXcoor,topYcoor = x,y
topIdx = lIdx
else:
lblshiftRatio = 2
labFinshift = ''
while labFinshift in doneLabs:
potentialPositions = [(x, y+lablshift), (x+lblshiftRatio*lablshift, y), (x-lblshiftRatio*lablshift, y), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
(x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y-lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
(x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift)]
for pP in potentialPositions:
labFinshift = pP
if labFinshift not in doneLabs:
break
lblshiftRatio+=1
doneLabs.add(labFinshift)
plt.annotate(label, xy = (x, y), xytext = labFinshift, textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'white', alpha = 0.4))#,arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=0'))
lIdx+=1
# plt.scatter(xCcG,yCcG, c= centrCluster, s= [1000]*len(xCcG), alpha = 0.4)#insert centroids
plt.xlim(0,n_columns)
plt.ylim(0,n_rows)
plt.gca().invert_yaxis()
plt.xlabel('SOM with "'+topTerm.upper()+'" serving as Greenwich. Level '+lvl+' terms, timeslot '+periodIdx+' (5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
interactive(True)
plt.show()
fig.savefig(greenwichFigWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString+'/SOM_AdjMat'+years+lvl+'_'+periodIdx+'.jpg',bbox_inches='tight')
plt.close()
interactive(False)
# print(greenSomUmatrix.shape())
'''Inserting BMUs with WMX code'''
xDimension, yDimension = [], []
for x in som.bmus:
dimTemp = toroidCoordinateFinder(x[0],clmnShift,x[1],rowShift,n_columns,n_rows)
xDimension.append(int(dimTemp[0]))
yDimension.append(int(dimTemp[1]))
fig, ax = plt.subplots()
plt.imshow(greenSomUmatrix,cmap = 'Spectral_r', aspect = 'auto')
plt.scatter(xDimension,yDimension,s=areas,c=colors)
labels = [termLabelDict[" ".join(re.findall("[a-z]+", nodes[x]))][labelFormat] for x in range(len(xDimension))]
doneLabs = set([''])
lIdx = 0
for label, x, y in zip(labels, xDimension, yDimension):
if lIdx == topIdx:
lIdx+=1
continue
lblshiftRatio = 2
labFinshift = ''
while labFinshift in doneLabs:
potentialPositions = [(x, y+lablshift), (x+lblshiftRatio*lablshift, y), (x-lblshiftRatio*lablshift, y), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
(x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y-lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
(x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift)]
for pP in potentialPositions:
labFinshift = pP
if labFinshift not in doneLabs:
break
lblshiftRatio+=1
doneLabs.add(labFinshift)
plt.annotate(label, xy = (x, y), xytext = labFinshift, textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'white', alpha = 0.4))#,arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=0'))
lIdx+=1
plt.annotate(termLabelDict[" ".join(re.findall("[a-z]+", topTerm))][labelFormat] , xy = (topXcoor, topYcoor), xytext = (topXcoor, topYcoor-lablshift), textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'red'))
# plt.scatter(xCcG,yCcG, c= centrCluster, s= [1000]*len(xCcG), alpha = 0.4)#insert centroids
plt.xlim(0,n_columns)
plt.ylim(0,n_rows)
plt.gca().invert_yaxis()
plt.xlabel('SOM with "'+topTerm.upper()+'" serving as Greenwich. Level '+lvl+' terms, timeslot '+periodIdx+' (5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
interactive(True)
plt.show()
fig.savefig(greenwichFigWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString+'/SOM_Wmatrix'+labelFormat+'LabeledAdjMat'+years+lvl+'_'+periodIdx+'.jpg',bbox_inches='tight')
plt.close()
interactive(False)
'''Inserting BMUs with WMX labels'''
labelFormat = 'label'
xDimension, yDimension = [], []
for x in som.bmus:
dimTemp = toroidCoordinateFinder(x[0],clmnShift,x[1],rowShift,n_columns,n_rows)
xDimension.append(int(dimTemp[0]))
yDimension.append(int(dimTemp[1]))
fig, ax = plt.subplots()
plt.imshow(greenSomUmatrix,cmap = 'Spectral_r', aspect = 'auto')
plt.scatter(xDimension,yDimension,s=areas,c=colors)
labels = [termLabelDict[" ".join(re.findall("[a-z]+", nodes[x]))][labelFormat] for x in range(len(xDimension))]
doneLabs = set([''])
lIdx = 0
for label, x, y in zip(labels, xDimension, yDimension):
if lIdx == topIdx:
lIdx+=1
continue
lblshiftRatio = 2
labFinshift = ''
while labFinshift in doneLabs:
potentialPositions = [(x, y+lablshift), (x+lblshiftRatio*lablshift, y), (x-lblshiftRatio*lablshift, y), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
(x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y-lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
(x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift)]
for pP in potentialPositions:
labFinshift = pP
if labFinshift not in doneLabs:
break
lblshiftRatio+=1
doneLabs.add(labFinshift)
plt.annotate(label, xy = (x, y), xytext = labFinshift, textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'white', alpha = 0.4))#,arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=0'))
lIdx+=1
plt.annotate(termLabelDict[" ".join(re.findall("[a-z]+", topTerm))][labelFormat] , xy = (topXcoor, topYcoor), xytext = (topXcoor, topYcoor-lablshift), textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'red'))
# plt.scatter(xCcG,yCcG, c= centrCluster, s= [1000]*len(xCcG), alpha = 0.4)#insert centroids
plt.xlim(0,n_columns)
plt.ylim(0,n_rows)
plt.gca().invert_yaxis()
plt.xlabel('SOM with "'+topTerm.upper()+'" serving as Greenwich. Level '+lvl+' terms, timeslot '+periodIdx+' (5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
interactive(True)
plt.show()
fig.savefig(greenwichFigWritePath+'/'+clusterAlgLabel+'Clusters/'+SOMdimensionsString+'/SOM_Wmatrix'+labelFormat+'LabeledAdjMat'+years+lvl+'_'+periodIdx+'.jpg',bbox_inches='tight')
plt.close()
interactive(False)
# # print('plotting Greenwich umatrix 3D surface')
# # if not os.path.exists(greenwichFigWritePath+'/SOM 3D Umatrices/'+SOMdimensionsString):
# # os.makedirs(greenwichFigWritePath+'/SOM 3D Umatrices/'+SOMdimensionsString)
# # fig = plt.figure()
# # ax = fig.gca(projection='3d')
# # X = np.arange(0, n_columns, 1)
# # Y = np.arange(0, n_rows, 1)
# # X, Y = np.meshgrid(X, Y)
# # N=greenSomUmatrix/greenSomUmatrix.max()
# # surf = ax.plot_surface(X, Y, greenSomUmatrix, facecolors=cm.jet(N),rstride=1, cstride=1)
# # # ax.set_zlim(-1.01, 1.01)
# # # ax.zaxis.set_major_locator(LinearLocator(10))
# # # ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# # m = cm.ScalarMappable(cmap=cm.jet)
# # m.set_array(greenSomUmatrix)
# # plt.colorbar(m, shrink=0.5, aspect=5)
# # plt.title('Greenwich SOM umatrix 3D surface vizualization (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
# # mng = plt.get_current_fig_manager()
# # mng.window.state('zoomed')
# # interactive(True)
# # plt.show()
# # fig.savefig(greenwichFigWritePath+'/SOM 3D Umatrices/'+SOMdimensionsString+'/umxSurf'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
# # plt.close()
# # interactive(False)
#------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''Check for merges and splits in Greenwich files'''#-------------------------------------------
#-----------------------------------------------------------------------------------------------
# if int(periodIdx)>0:
# if not os.path.exists(splitsMergesWritePath+'/'+SOMdimensionsString):
# os.makedirs(splitsMergesWritePath+'/'+SOMdimensionsString)
# tmpStrClusters = [','.join([str(y) for y in x]) for x in greenBmus]
# strClustDict[periodIdx] = {}
# for idx, sC in enumerate(tmpStrClusters):
# if sC in strClustDict[periodIdx]:
# strClustDict[periodIdx][sC].append(nodes[idx])
# else:
# strClustDict[periodIdx][sC] = [nodes[idx]]
# tmpSameBMUsNodes = list(strClustDict[periodIdx].values())
# invStrClustDict[periodIdx] = {','.join(v):k for k,v in strClustDict[periodIdx].items()}
# dataDict[periodIdx]['bmuNodes'] = tmpSameBMUsNodes
# tmpsplits,tmpmerges = 0, 0
# with open(splitsMergesWritePath+'/'+SOMdimensionsString+'/changes'+years+lvl+'_'+periodIdx+'.txt','w') as f:
# for tsbn in tmpSameBMUsNodes:
# if tsbn not in dataDict[str(int(periodIdx)-1)]['bmuNodes']:
# oldbmucoords = []
# for ts in tsbn:
# for ots in dataDict[str(int(periodIdx)-1)]['bmuNodes']:
# if ts in ots:
# oldbmucoords.append(invStrClustDict[str(int(periodIdx)-1)][','.join(ots)])
# if len(set(oldbmucoords)) < 2:
# f.write('Terms %s at %s were split from %s \n' %(','.join(tsbn),invStrClustDict[periodIdx][','.join(tsbn)],'|'.join(oldbmucoords)))
# if len(tsbn) <= len(strClustDict[str(int(periodIdx)-1)][oldbmucoords[0]])/2:
# tmpsplits+=len(tsbn)
# termDislocation['splits'].extend(tsbn)
# termDislocation['both'].extend(tsbn)
# else:
# f.write('Terms %s at %s were merged from %s \n' %(','.join(tsbn),invStrClustDict[periodIdx][','.join(tsbn)],'|'.join(oldbmucoords)))
# for tmpclusts in [strClustDict[str(int(periodIdx)-1)][x] for x in set(oldbmucoords)]:
# tmpclustIntersect = set(tmpclusts).intersection(set(tsbn))
# if len(tmpclustIntersect) <= len(tsbn)/2:
# tmpmerges+=len(tmpclustIntersect)
# termDislocation['merges'].extend(tmpclustIntersect)
# termDislocation['both'].extend(tmpclustIntersect)
# # termDislocation['both'].extend(tsbn)
# dislocationDict['merges'].append(100*tmpmerges/len(dataDict['uniquePersistentTerms']))
# dislocationDict['splits'].append(100*tmpsplits/len(dataDict['uniquePersistentTerms']))
# dislocationDict['both'].append(100*(tmpmerges+tmpsplits)/len(dataDict['uniquePersistentTerms']))
# else:
# tmpStrClusters = [','.join([str(y) for y in x]) for x in greenBmus]
# strClustDict = {periodIdx:{}}
# for idx, sC in enumerate(tmpStrClusters):
# if sC in strClustDict[periodIdx]:
# strClustDict[periodIdx][sC].append(nodes[idx])
# else:
# strClustDict[periodIdx][sC] = [nodes[idx]]
# dataDict[periodIdx]['bmuNodes'] = list(strClustDict[periodIdx].values())
# invStrClustDict = {periodIdx:{','.join(v):k for k,v in strClustDict[periodIdx].items()}}
# dislocationDict = {'merges':[],'splits':[],'both':[]}
# termDislocation = {'merges':[],'splits':[],'both':[]}
#-------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------
# allPeriods = list(dataDict.keys())
# allPeriods.remove('uniquePersistentTerms')
# allPeriods.remove('allTerms')
# try:
# allPeriods.remove('topTermsByPR')
# except:
# pass
# allPeriods.sort()
# marker, color = ['*', '+', 'o','d','h','p','s','v','^','d'], ['g','r','m','c','y','k']#line, ["-","--","-.",":"] #list(colors.cnames.keys())
# marker.sort()
# color.sort()
# asmarker = itertools.cycle(marker)
# ascolor = itertools.cycle(color)
# # asline = itertools.cycle(line)
#----------------------------------------------------------------------------------------------------
'''Plot merges and splits distribution over time'''#-------------------------------------------------
#----------------------------------------------------------------------------------------------------
# fig, ax = plt.subplots()
# plt.plot(dislocationDict['splits'], marker='+', color='b',label='Splits')
# plt.plot(dislocationDict['merges'], marker='o', color='r',label='Merges')
# plt.plot(dislocationDict['both'], marker='*', color='g',label='Splits+Merges')
# ax.set_xticklabels([int(x)*5+trueYearsIni[idy] for x in allPeriods[1:]], minor=False)
# ax.legend(loc='upper left',ncol=5)
# ax.set_ylabel('Frequency Percentage %')
# plt.title('Population of terms splitting and merging over time')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(splitsMergesWritePath+'/'+SOMdimensionsString+'/driftPercentage_'+years+lvl+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
#----------------------------------------------------------------------------------------------------
'''Write high risk terms for splitting and merging over time'''#-------------------------------------
#----------------------------------------------------------------------------------------------------
# termSplitsCount = collections.Counter(termDislocation['splits'])
# termMergesCount = collections.Counter(termDislocation['merges'])
# termBothCount = collections.Counter(termDislocation['both'])
# with open(splitsMergesWritePath+'/'+SOMdimensionsString+'/highRiskSplits_'+years+lvl+'.txt','w') as f1:
# f1.write('\n'.join(['\t'.join([str(x) for x in y]) for y in termSplitsCount.most_common(50)]))
# with open(splitsMergesWritePath+'/'+SOMdimensionsString+'/highRiskMerges_'+years+lvl+'.txt','w') as f2:
# f2.write('\n'.join(['\t'.join([str(x) for x in y]) for y in termMergesCount.most_common(50)]))
# with open(splitsMergesWritePath+'/'+SOMdimensionsString+'/highRiskBoth_'+years+lvl+'.txt','w') as f3:
# f3.write('\n'.join(['\t'.join([str(x) for x in y]) for y in termBothCount.most_common(50)]))
#-------------------------------------------------------------------------------------------------
'''pageRank and HITS term fluctuation'''
# numOfPlots = [5, 10, 20]
# if not os.path.exists(figWritePath+'/centrality fluctuations over time/PageRank'):
# os.makedirs(figWritePath+'/centrality fluctuations over time/PageRank')
# os.makedirs(figWritePath+'/centrality fluctuations over time/HITS')
# os.makedirs(figWritePath+'/centrality fluctuations over time/Betweenness')
# termPRRankDict = {}
# termPRSequences = {}
# termAuthRankDict = {}
# termAuthSequences = {}
# termHubRankDict = {}
# termHubSequences = {}
# termBetweenRankDict = {}
# termBetweenSequences = {}
# for x in nodes:
# prSequence, authSequence, hubSequence, betweenSequence = [], [] ,[], []
# for p in allPeriods:
# prSequence.append(dataDict[p]['term']['pageRank'][x])
# authSequence.append(dataDict[p]['term']['authority'][x])
# hubSequence.append(dataDict[p]['term']['hub'][x])
# betweenSequence.append(dataDict[p]['term']['betweenness'][x])
# termPRSequences[x] = prSequence
# termPRRankDict[x] = recRank(termPrRanks[x])
# termAuthSequences[x] = authSequence
# termAuthRankDict[x] = recRank(termAuthRanks[x])
# termHubSequences[x] = hubSequence
# termHubRankDict[x] = recRank(termHubRanks[x])
# termBetweenSequences[x] = betweenSequence
# termBetweenRankDict[x] = recRank(termBetweenRanks[x])
# termPRRanked = sorted(termPRRankDict, key=termPRRankDict.get, reverse=True)
# termAuthRanked = sorted(termAuthRankDict, key=termAuthRankDict.get, reverse=True)
# termHubRanked = sorted(termHubRankDict, key=termHubRankDict.get, reverse=True)
# termBetweenRanked = sorted(termBetweenRankDict, key=termBetweenRankDict.get, reverse=True)
# dataDict['topTermsByPR'] = termPRRanked
# # print(termPRRanked)
# # dataDict['termPRRankDict'] = termPRRankDict
# # print(termPRRankDict)
# for nop in numOfPlots:
# fig, ax = plt.subplots()
# for x in termPRRanked[:nop]:
# plt.plot(termPRSequences[x], marker=next(asmarker), color=next(ascolor),label=x)
# ax.set_xticklabels([int(x)*5+trueYearsIni[idy] for x in allPeriods], minor=False)
# ax.legend(loc='upper left',ncol=5)
# plt.title('Term PageRank fluctuation over time')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/centrality fluctuations over time/PageRank/top'+str(nop)+'pagerankFlux_'+years+lvl+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# fig, ax = plt.subplots()
# for x in termAuthRanked[:nop]:
# plt.plot(termAuthSequences[x], marker=next(asmarker), color=next(ascolor),label=x)
# ax.set_xticklabels([int(x)*5+trueYearsIni[idy] for x in allPeriods], minor=False)
# plt.ylim(0, 1.1)
# ax.legend(loc='upper left',ncol=5)
# plt.title('Term Authority fluctuation over time')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/centrality fluctuations over time/HITS/top'+str(nop)+'authorityFlux_'+years+lvl+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# fig, ax = plt.subplots()
# for x in termHubRanked[:nop]:
# plt.plot(termHubSequences[x], marker=next(asmarker), color=next(ascolor),label=x)
# ax.set_xticklabels([int(x)*5+trueYearsIni[idy] for x in allPeriods], minor=False)
# plt.ylim(0, 1.1)
# ax.legend(loc='upper left',ncol=5)
# plt.title('Term Hub fluctuation over time')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/centrality fluctuations over time/HITS/top'+str(nop)+'hubFlux_'+years+lvl+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# fig, ax = plt.subplots()
# for x in termBetweenRanked[:nop]:
# plt.plot(termBetweenSequences[x], marker=next(asmarker), color=next(ascolor),label=x)
# ax.set_xticklabels([int(x)*5+trueYearsIni[idy] for x in allPeriods], minor=False)
# ax.legend(loc='upper left',ncol=5)
# plt.title('Term betweenness fluctuation over time')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/centrality fluctuations over time/betweenness/top'+str(nop)+'BetweenFlux_'+years+lvl+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# # ------------------------------------------------------------------------------------------------
'''make histograms of WMX codes overall'''#------------------------------------------------------------
# if not os.path.exists(figWritePath+'/histograms'):
# os.makedirs(figWritePath+'/histograms')
# os.makedirs(figWritePath+'/histograms/normalized')
# labelFormat = 'code' #switch terms by Wmatrix code or label?
# allWMXcodes = [termLabelDict[" ".join(re.findall("[a-z]+", x))][labelFormat] for x in nodes]
# countAllWMXcodes = collections.Counter(allWMXcodes)
# allWMXhistLabels = sorted(list(countAllWMXcodes.keys()))
# allWMXhistVals = [countAllWMXcodes[x] for x in allWMXhistLabels]
# fig, ax = plt.subplots()
# ind = np.arange(len(allWMXhistVals))
# ax.bar(ind,allWMXhistVals,color = 'b')
# ax.set_ylabel('Frequency')
# ax.set_xlabel('WMX codes')
# ax.set_xticks(ind+0.45)
# ax.set_xticklabels(allWMXhistLabels,rotation = 90, fontsize=heatMapFont+2)
# plt.title('WMX category appearance histogram (Level '+lvl+' terms)')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/histograms/appearanceWMXcodeDistribution'+years+lvl+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# # check correlation between WMX code histograms----------------------------------------------------
# allWMXhistLabels = sorted(list(set([termLabelDict[" ".join(re.findall("[a-z]+", x))][labelFormat] for x in nodes])))
# fig,ax = plt.subplots()
# for idx, h in enumerate(histoNormAggList):
# plt.plot(h, label = yearList[idx])
# ax.set_xticklabels(allWMXhistLabels)
# ax.set_ylabel('Frequency')
# ax.set_xlabel('WMX codes')
# ax.set_xticks(np.arange(len(allWMXhistVals)))
# ax.set_xticklabels(allWMXhistLabels,rotation = 90, fontsize=heatMapFont+2)
# ax.legend(loc='upper left',ncol=5)
# plt.title('Histogram WMXcode fluctuation correlation for the'+years+' era and '+lvl)
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
# interactive(True)
# plt.show()
# fig.savefig(figWritePath+'/histograms/WMXcodeDistributionCorrelation'+years+lvl+'.png',bbox_inches='tight')
# plt.close()
# interactive(False)
# pickle.dump(dataDict,open('./data/artworks_tmp/edgeDictDynamic'+years+lvl+'.pck','wb'), protocol = 2)
elapsed = time.time() - t
print('Total time Elapsed: %.2f seconds' % elapsed)
| apache-2.0 | 770,716,912,347,527,700 | 60.118451 | 265 | 0.517387 | false |
Som-Energia/somenergia-generationkwh | generationkwh/sharescurve_test.py | 1 | 10351 | # -*- coding: utf-8 -*-
from .sharescurve import (
MemberSharesCurve,
PlantSharesCurve,
MixTotalSharesCurve,
LayeredShareCurve,
)
import unittest
from yamlns import namespace as ns
from .isodates import isodate
class ItemProvider_MockUp(object):
def __init__(self, items):
self._items = [
ns(
myattribute=itemattribute,
firstEffectiveDate=isodate(start),
lastEffectiveDate=isodate(end),
shares=shares,
)
for itemattribute, start, end, shares in items
]
def items(self):
return self._items
class InvestmentProvider_MockUp(object):
def __init__(self, items):
self._contracts = [
ns(
member=member,
firstEffectiveDate=isodate(start),
lastEffectiveDate=isodate(end),
shares=shares,
)
for member, start, end, shares in items
]
def items(self):
return self._contracts
class PlantProvider_MockUp(object):
def __init__(self, items):
self._plants = [
ns(
mix=mix,
firstEffectiveDate=isodate(start),
lastEffectiveDate=isodate(end),
shares=shares,
)
for mix, start, end, shares in items
]
def items(self):
return self._plants
class LayeredShareCurve_Test(unittest.TestCase):
def assert_atDay_equal(self, filterValue, day, items, expectation):
provider = ItemProvider_MockUp(items)
curve = LayeredShareCurve(
items = provider,
filterAttribute = 'myattribute',
)
self.assertEqual(expectation, curve.atDay(isodate(day),filterValue))
def assertActiveSharesEqual(self, filterValue, start, end, items, expected):
provider = ItemProvider_MockUp(items)
curve = LayeredShareCurve(
items = provider,
filterAttribute = 'myattribute',
)
result = curve.hourly(isodate(start), isodate(end),filterValue)
self.assertEqual(list(result), expected)
def test_atDay_noShares(self):
self.assert_atDay_equal(
'member', '2015-02-21',
[],
0
)
def test_atDay_singleShare(self):
self.assert_atDay_equal(
'member', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
],
3
)
def test_atDay_multipleShares_getAdded(self):
self.assert_atDay_equal(
'member', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('member', '2015-02-21', '2015-02-21', 5),
],
8
)
def test_atDay_otherMembersIgnored(self):
self.assert_atDay_equal(
'member', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('other', '2015-02-21', '2015-02-21', 5),
],
3
)
def test_atDay_allMembersCountedIfNoneSelected(self):
self.assert_atDay_equal(
None, '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('other', '2015-02-21', '2015-02-21', 5),
],
8
)
def test_atDay_expiredActionsNotCounted(self):
self.assert_atDay_equal(
'member', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('member', '2014-02-21', '2015-02-20', 5),
],
3
)
def test_atDay_unactivatedActions(self):
self.assert_atDay_equal(
'member', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('member', '2015-02-22', '2016-02-20', 5),
],
3
)
def test_hourly_singleDay_noShares(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-21',
[],
+25*[0]
)
def test_hourly_singleDay_singleShare(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
],
+25*[3]
)
def test_hourly_singleDay_multipleShare(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('member', '2015-02-21', '2015-02-21', 5),
],
+25*[8]
)
def test_hourly_otherMembersIgnored(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('other', '2015-02-21', '2015-02-21', 5),
],
+25*[3]
)
def test_hourly_allMembersCountedIfNoneSelected(self):
self.assertActiveSharesEqual(
None, '2015-02-21', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('other', '2015-02-21', '2015-02-21', 5),
],
+25*[8]
)
def test_hourly_expiredActionsNotCounted(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('member', '2014-02-21', '2015-02-20', 5),
],
+25*[3]
)
def test_hourly_notYetActivatedActions(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-21',
[
('member', '2015-02-21', '2015-02-21', 3),
('member', '2015-02-22', '2016-02-20', 5),
],
+25*[3]
)
def test_hourly_unactivatedActions(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-21',
[
('member', False, False, 3),
('member', '2015-02-21', '2016-02-20', 5),
],
+25*[5]
)
def test_hourly_undeactivatedActions(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-21',
[
('member', '2015-02-21', False, 3),
('member', '2015-02-21', '2016-02-20', 5),
],
+25*[8]
)
def test_hourly_twoDays(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-22',
[
('member', '2015-02-21', '2015-02-22', 3),
],
+25*[3]
+25*[3]
)
def test_hourly_lastDaysNotActive(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-22',
[
('member', '2015-02-21', '2015-02-21', 3),
],
+25*[3]
+25*[0]
)
def test_hourly_firstDaysNotActive(self):
self.assertActiveSharesEqual(
'member', '2015-02-21', '2015-02-22',
[
('member', '2015-02-22', '2015-02-26', 3),
],
+25*[0]
+25*[3]
)
def test_hourly_swappedDatesReturnsEmpty(self):
self.assertActiveSharesEqual(
'member', '2016-02-21', '2015-02-22',
[
],
[]
)
def test_hourly_fullCase(self):
self.assertActiveSharesEqual(
'member', '2015-02-11', '2016-03-11',
[
('member', '2015-02-10', '2015-02-22', 3),
('member', '2015-01-11', '2015-02-11', 5),
('member', '2015-02-13', '2015-02-14', 7),
('member', '2015-02-16', '2015-02-24', 11),
# ignored
('member', '2014-02-12', '2014-02-22', 13), # early
('member', '2017-02-12', '2017-02-22', 17), # late
('other', '2015-02-12', '2015-02-22', 21), # other
],
+25*[8] # 11
+25*[3] # 12
+25*[10] # 13
+25*[10] # 14
+25*[3] # 15
+25*[14] # 16
+25*[14] # 17
+25*[14] # 18
+25*[14] # 19
+25*[14] # 20
+25*[14] # 21
+25*[14] # 22
+25*[11] # 23
+25*[11] # 24
+25*381*[0] # 25 and so
)
class MemberSharesCurve_Test(LayeredShareCurve_Test):
def assert_atDay_equal(self, member, day, investments, expectation):
provider = InvestmentProvider_MockUp(investments)
curve = MemberSharesCurve(investments = provider)
self.assertEqual(expectation, curve.atDay(isodate(day),member))
def assertActiveSharesEqual(self, member, start, end, investments, expected):
provider = InvestmentProvider_MockUp(investments)
curve = MemberSharesCurve(investments = provider)
result = curve.hourly(isodate(start), isodate(end),member)
self.assertEqual(list(result), expected)
class TotalMixShareCurve_Test(LayeredShareCurve_Test):
def assert_atDay_equal(self, member, day, plants, expectation):
provider = PlantProvider_MockUp(plants)
curve = MixTotalSharesCurve(plants = provider)
self.assertEqual(expectation, curve.atDay(isodate(day),member))
def assertActiveSharesEqual(self, mix, start, end, plants, expected):
provider = PlantProvider_MockUp(plants)
curve = MixTotalSharesCurve(plants = provider)
result = curve.hourly(isodate(start), isodate(end),mix)
self.assertEqual(list(result), expected)
class PlantSharesCurve_Test(unittest.TestCase):
def test_atDay(self):
curve = PlantSharesCurve(5000)
self.assertEqual(5000, curve.atDay(isodate('2016-05-01')))
def test_hourly(self):
curve = PlantSharesCurve(5000)
self.assertEqual(25*[5000], list(curve.hourly(isodate('2016-05-01'), isodate('2016-05-01'))))
# vim: ts=4 sw=4 et
| agpl-3.0 | 8,626,699,505,108,588,000 | 29.444118 | 101 | 0.487006 | false |
google-research/google-research | cluster_gcn/partition_utils.py | 1 | 2321 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collections of partitioning functions."""
import time
import metis
import scipy.sparse as sp
import tensorflow.compat.v1 as tf
def partition_graph(adj, idx_nodes, num_clusters):
"""partition a graph by METIS."""
start_time = time.time()
num_nodes = len(idx_nodes)
num_all_nodes = adj.shape[0]
neighbor_intervals = []
neighbors = []
edge_cnt = 0
neighbor_intervals.append(0)
train_adj_lil = adj[idx_nodes, :][:, idx_nodes].tolil()
train_ord_map = dict()
train_adj_lists = [[] for _ in range(num_nodes)]
for i in range(num_nodes):
rows = train_adj_lil[i].rows[0]
# self-edge needs to be removed for valid format of METIS
if i in rows:
rows.remove(i)
train_adj_lists[i] = rows
neighbors += rows
edge_cnt += len(rows)
neighbor_intervals.append(edge_cnt)
train_ord_map[idx_nodes[i]] = i
if num_clusters > 1:
_, groups = metis.part_graph(train_adj_lists, num_clusters, seed=1)
else:
groups = [0] * num_nodes
part_row = []
part_col = []
part_data = []
parts = [[] for _ in range(num_clusters)]
for nd_idx in range(num_nodes):
gp_idx = groups[nd_idx]
nd_orig_idx = idx_nodes[nd_idx]
parts[gp_idx].append(nd_orig_idx)
for nb_orig_idx in adj[nd_orig_idx].indices:
nb_idx = train_ord_map[nb_orig_idx]
if groups[nb_idx] == gp_idx:
part_data.append(1)
part_row.append(nd_orig_idx)
part_col.append(nb_orig_idx)
part_data.append(0)
part_row.append(num_all_nodes - 1)
part_col.append(num_all_nodes - 1)
part_adj = sp.coo_matrix((part_data, (part_row, part_col))).tocsr()
tf.logging.info('Partitioning done. %f seconds.', time.time() - start_time)
return part_adj, parts
| apache-2.0 | -5,812,103,375,373,815,000 | 30.364865 | 77 | 0.670832 | false |
HybridF5/jacket | jacket/tests/compute/unit/virt/hyperv/test_pathutils.py | 1 | 3157 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from six.moves import builtins
from jacket.compute import exception
from jacket.tests.compute.unit.virt.hyperv import test_base
from jacket.compute.virt.hyperv import constants
from jacket.compute.virt.hyperv import pathutils
class PathUtilsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V PathUtils class."""
def setUp(self):
super(PathUtilsTestCase, self).setUp()
self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir')
self.fake_instance_name = 'fake_instance_name'
self._pathutils = pathutils.PathUtils()
def _mock_lookup_configdrive_path(self, ext):
self._pathutils.get_instance_dir = mock.MagicMock(
return_value=self.fake_instance_dir)
def mock_exists(*args, **kwargs):
path = args[0]
return True if path[(path.rfind('.') + 1):] == ext else False
self._pathutils.exists = mock_exists
configdrive_path = self._pathutils.lookup_configdrive_path(
self.fake_instance_name)
return configdrive_path
def test_lookup_configdrive_path(self):
for format_ext in constants.DISK_FORMAT_MAP:
configdrive_path = self._mock_lookup_configdrive_path(format_ext)
fake_path = os.path.join(self.fake_instance_dir,
'configdrive.' + format_ext)
self.assertEqual(configdrive_path, fake_path)
def test_lookup_configdrive_path_non_exist(self):
self._pathutils.get_instance_dir = mock.MagicMock(
return_value=self.fake_instance_dir)
self._pathutils.exists = mock.MagicMock(return_value=False)
configdrive_path = self._pathutils.lookup_configdrive_path(
self.fake_instance_name)
self.assertIsNone(configdrive_path)
@mock.patch('os.path.join')
def test_get_instances_sub_dir(self, fake_path_join):
class WindowsError(Exception):
def __init__(self, winerror=None):
self.winerror = winerror
fake_dir_name = "fake_dir_name"
fake_windows_error = WindowsError
self._pathutils.check_create_dir = mock.MagicMock(
side_effect=WindowsError(pathutils.ERROR_INVALID_NAME))
with mock.patch.object(builtins, 'WindowsError',
fake_windows_error, create=True):
self.assertRaises(exception.AdminRequired,
self._pathutils._get_instances_sub_dir,
fake_dir_name)
| apache-2.0 | 2,915,794,038,659,720,000 | 39.474359 | 78 | 0.653468 | false |
redhataccess/redhat-support-tool | src/redhat_support_tool/plugins/config.py | 1 | 12079 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from optparse import Option
from redhat_support_tool.helpers.confighelper import EmptyValueError, _
from redhat_support_tool.plugins import Plugin
import logging
import redhat_support_tool.helpers.common as common
import redhat_support_tool.helpers.confighelper as confighelper
import os
__author__ = 'Keith Robertson <[email protected]>'
__author__ = 'Rex White <[email protected]>'
logger = logging.getLogger("redhat_support_tool.plugins.config")
class Config(Plugin):
plugin_name = 'config'
@classmethod
def get_usage(cls):
'''
The usage statement that will be printed by OptionParser.
Example:
- %prog -c CASENUMBER [options] <comment text here>
Important: %prog is a OptionParser built-in. Use it!
'''
return _('%prog [options] config.option <new option value>')
@classmethod
def get_desc(cls):
'''
The description statement that will be printed by OptionParser.
Example:
- 'Use the \'%s\' command to add a comment to a case.'\
% cls.plugin_name
'''
return _('Use the \'%s\' command to set or get configuration '
'file values.') % cls.plugin_name
@classmethod
def get_epilog(cls):
'''
The epilog string that will be printed by OptionParser. Usually
used to print an example of how to use the program.
Example:
Examples:
- %s -c 12345678 Lorem ipsum dolor sit amet, consectetur adipisicing
- %s -c 12345678
'''
options = _('\nThe configuration file options which can be set are:\n')
# query plugins for list of options
plugins = common.get_plugin_dict()
for p_name, p_class in plugins.items():
func = getattr(p_class, 'config_help')
options = options + func()
return _('%s\n'
'Examples:\n'
'- %s user\n'
'- %s user my-rhn-username\n'
'- %s --unset user\n') % \
(options, cls.plugin_name, cls.plugin_name, cls.plugin_name)
@classmethod
def get_options(cls):
'''
Subclasses that need command line options should override this method
and return an array of optparse.Option(s) to be used by the
OptionParser.
Example:
return [Option("-f", "--file", action="store",
dest="filename", help='Some file'),
Option("-c", "--case",
action="store", dest="casenumber",
help='A case')]
Would produce the following:
Command (? for help): help mycommand
Usage: mycommand [options]
Use the 'mycommand' command to find a knowledge base solution by ID
Options:
-h, --help show this help message and exit
-f, --file Some file
-c, --case A case
Example:
- mycommand -c 12345 -f abc.txt
'''
return [Option("-g", "--global", dest="global",
help=_('Save configuration option in %s.' %
confighelper.ConfigHelper.GLOBAL_CONFIG_FILE),
action="store_true",
default=False),
Option("-u", "--unset", dest="unset",
help=_('Unset configuration option.'),
action="store_true", default=False)]
#
# Methods related to intrinsic configuration options
#
@classmethod
def config_help(self):
'''
Return descriptions for all the intrinsic configuration options
'''
options = " %-10s: %-67s\n" % \
('user', 'The Red Hat Customer Portal user.')
options = options + " %-10s: %-67s\n" % \
('password', 'The Red Hat Customer Portal password.')
options = options + " %-10s: %-67s\n" % \
('debug', 'CRITICAL, ERROR, WARNING, INFO, or DEBUG')
options = options + " %-10s: %-67s\n" % \
('url', _('The support services URL. Default=%s') % \
confighelper.ConfigHelper.DEFAULT_URL)
options = options + " %-10s: %-67s\n" % \
('proxy_url', _('A proxy server URL.'))
options = options + " %-10s: %-67s\n" % \
('proxy_user', _('A proxy server user.'))
options = options + " %-10s: %-67s\n" % \
('proxy_password', _('A password for the proxy server user.'))
options += " %-10s: %-67s\n" % ('ssl_ca',
_('Path to certificate authorities to trust during communication.'))
options += " %-10s: %-67s\n" % ('kern_debug_dir',
_('Path to the directory where kernel debug symbols should be '
'downloaded and cached. Default=%s') %
confighelper.ConfigHelper.DEFAULT_KERN_DEBUG_DIR)
return options
@classmethod
def config_get_user(cls):
cfg = confighelper.get_config_helper()
return cfg.get(section='RHHelp', option='user')
@classmethod
def config_set_user(cls, user, global_config=False):
cfg = confighelper.get_config_helper()
cfg.set(section='RHHelp', option='user', value=user,
persist=True, global_config=global_config)
@classmethod
def config_set_password(cls, global_config=False):
cfg = confighelper.get_config_helper()
cfg.prompt_for_password(prompt=False, global_config=global_config)
@classmethod
def config_get_debug(cls):
cfg = confighelper.get_config_helper()
return cfg.get(section='RHHelp', option='debug')
@classmethod
def config_set_debug(cls, debug, global_config=False):
if debug in logging._levelNames:
cfg = confighelper.get_config_helper()
cfg.set(section='RHHelp', option='debug', value=debug,
persist=True, global_config=global_config)
else:
raise EmptyValueError(_('%s is not a valid logging level.') %
debug)
@classmethod
def config_get_url(cls):
cfg = confighelper.get_config_helper()
return cfg.get(section='RHHelp', option='url')
@classmethod
def config_set_url(cls, url, global_config=False):
cfg = confighelper.get_config_helper()
cfg.set(section='RHHelp', option='url', value=url, persist=True,
global_config=global_config)
@classmethod
def config_get_proxy_url(cls):
cfg = confighelper.get_config_helper()
return cfg.get(section='RHHelp', option='proxy_url')
@classmethod
def config_set_proxy_url(cls, url, global_config=False):
cfg = confighelper.get_config_helper()
cfg.set(section='RHHelp', option='proxy_url', value=url, persist=True,
global_config=global_config)
@classmethod
def config_get_proxy_user(cls):
cfg = confighelper.get_config_helper()
return cfg.get(section='RHHelp', option='proxy_user')
@classmethod
def config_set_proxy_user(cls, user, global_config=False):
cfg = confighelper.get_config_helper()
cfg.set(section='RHHelp', option='proxy_user', value=user,
persist=True, global_config=global_config)
@classmethod
def config_set_proxy_password(cls, global_config=False):
cfg = confighelper.get_config_helper()
cfg.prompt_for_proxy_password(prompt=False,
global_config=global_config)
@classmethod
def config_get_ssl_ca(cls):
cfg = confighelper.get_config_helper()
return cfg.get(section='RHHelp', option='ssl_ca')
@classmethod
def config_set_ssl_ca(cls, ssl_ca, global_config=False):
cfg = confighelper.get_config_helper()
if not os.access(ssl_ca, os.R_OK):
msg = _('Unable to read certificate at %s') % (ssl_ca)
print msg
raise Exception(msg)
cfg.set(section='RHHelp', option='ssl_ca', value=ssl_ca,
persist=True, global_config=global_config)
@classmethod
def config_get_kern_debug_dir(cls):
cfg = confighelper.get_config_helper()
return cfg.get(section='RHHelp', option='kern_debug_dir')
@classmethod
def config_set_kern_debug_dir(cls, kern_debug_dir, global_config=False):
cfg = confighelper.get_config_helper()
cfg.set(section='RHHelp', option='kern_debug_dir',
value=kern_debug_dir, persist=True,
global_config=global_config)
#
# Main logic
#
def validate_args(self):
if not self._args:
msg = _('ERROR: %s requires the name of an option to be '
'set in the config file.') % self.plugin_name
print msg
raise Exception(msg)
def non_interactive_action(self):
if self._options['global']:
global_config = True
else:
global_config = False
# check for display mode
if len(self._args) == 0:
# TODO: maybe implement __repr__ on confighelper and print that?
# get list of global config options
# get list of local config options
pass
# get / set config option...
else:
# determine section and option.
items = self._args[0].split('.')
if len(items) == 1:
section = 'RHHelp'
option = items[0]
else:
section = items[0]
option = items[1]
# get option's owning class
if section == 'RHHelp':
opt_class = self.__class__
else:
opt_class = common.get_plugin_dict()[section]
# process command...
try:
# handle 'unset' command
if self._options['unset']:
cfg = confighelper.get_config_helper()
cfg.remove_option(section, option, global_config)
# 'password' is a special case: a one-arg set...
elif option == 'password':
self.config_set_password(global_config=global_config)
# 'proxy_password' is the other special case: a one-arg set...
elif option == 'proxy_password':
self.config_set_proxy_password(global_config=global_config)
# is this a 'set' or a 'get'?
# 'gets' have one arg...
elif len(self._args) == 1:
func = getattr(opt_class, 'config_get_' + option)
print func()
# ... 'sets' will have two args
elif len(self._args) == 2:
func = getattr(opt_class, 'config_set_' + option)
func(self._args[1], global_config=global_config)
except AttributeError:
msg = _('ERROR: %s is not a valid configuration file option.')\
% self._args[0]
print msg
logger.log(logging.WARNING, msg)
raise
except EmptyValueError, eve:
print eve
logger.log(logging.WARNING, eve)
raise
except Exception, e:
logger.log(logging.WARNING, e)
raise
return
| apache-2.0 | 1,848,984,380,404,141,300 | 34.949405 | 79 | 0.559815 | false |
Forage/Gramps | gramps/gen/filters/rules/citation/_hasgallery.py | 1 | 1838 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasgallerybase import HasGalleryBase
#-------------------------------------------------------------------------
# "Sources who have media object reference"
#-------------------------------------------------------------------------
class HasGallery(HasGalleryBase):
"""Rule that checks for citation who has media object reference"""
name = _('Citations with <count> media')
description = _("Matches citations with a certain number of items in the gallery")
| gpl-2.0 | -9,192,749,210,201,180,000 | 37.291667 | 86 | 0.557127 | false |
Kraymer/beets | beets/library.py | 1 | 53386 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The core data store and collection logic for beets.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import unicodedata
import time
import re
import six
from beets import logging
from beets.mediafile import MediaFile, UnreadableFileError
from beets import plugins
from beets import util
from beets.util import bytestring_path, syspath, normpath, samefile
from beets.util.functemplate import Template
from beets import dbcore
from beets.dbcore import types
import beets
# To use the SQLite "blob" type, it doesn't suffice to provide a byte
# string; SQLite treats that as encoded text. Wrapping it in a `buffer` or a
# `memoryview`, depending on the Python version, tells it that we
# actually mean non-text data.
if six.PY2:
BLOB_TYPE = buffer # noqa: F821
else:
BLOB_TYPE = memoryview
log = logging.getLogger('beets')
# Library-specific query types.
class PathQuery(dbcore.FieldQuery):
"""A query that matches all items under a given path.
Matching can either be case-insensitive or case-sensitive. By
default, the behavior depends on the OS: case-insensitive on Windows
and case-sensitive otherwise.
"""
def __init__(self, field, pattern, fast=True, case_sensitive=None):
"""Create a path query. `pattern` must be a path, either to a
file or a directory.
`case_sensitive` can be a bool or `None`, indicating that the
behavior should depend on the filesystem.
"""
super(PathQuery, self).__init__(field, pattern, fast)
# By default, the case sensitivity depends on the filesystem
# that the query path is located on.
if case_sensitive is None:
path = util.bytestring_path(util.normpath(pattern))
case_sensitive = beets.util.case_sensitive(path)
self.case_sensitive = case_sensitive
# Use a normalized-case pattern for case-insensitive matches.
if not case_sensitive:
pattern = pattern.lower()
# Match the path as a single file.
self.file_path = util.bytestring_path(util.normpath(pattern))
# As a directory (prefix).
self.dir_path = util.bytestring_path(os.path.join(self.file_path, b''))
@classmethod
def is_path_query(cls, query_part):
"""Try to guess whether a unicode query part is a path query.
Condition: separator precedes colon and the file exists.
"""
colon = query_part.find(':')
if colon != -1:
query_part = query_part[:colon]
# Test both `sep` and `altsep` (i.e., both slash and backslash on
# Windows).
return (
(os.sep in query_part or
(os.altsep and os.altsep in query_part)) and
os.path.exists(syspath(normpath(query_part)))
)
def match(self, item):
path = item.path if self.case_sensitive else item.path.lower()
return (path == self.file_path) or path.startswith(self.dir_path)
def col_clause(self):
file_blob = BLOB_TYPE(self.file_path)
dir_blob = BLOB_TYPE(self.dir_path)
if self.case_sensitive:
query_part = '({0} = ?) || (substr({0}, 1, ?) = ?)'
else:
query_part = '(BYTELOWER({0}) = BYTELOWER(?)) || \
(substr(BYTELOWER({0}), 1, ?) = BYTELOWER(?))'
return query_part.format(self.field), \
(file_blob, len(dir_blob), dir_blob)
# Library-specific field types.
class DateType(types.Float):
# TODO representation should be `datetime` object
# TODO distinguish between date and time types
query = dbcore.query.DateQuery
def format(self, value):
return time.strftime(beets.config['time_format'].as_str(),
time.localtime(value or 0))
def parse(self, string):
try:
# Try a formatted date string.
return time.mktime(
time.strptime(string,
beets.config['time_format'].as_str())
)
except ValueError:
# Fall back to a plain timestamp number.
try:
return float(string)
except ValueError:
return self.null
class PathType(types.Type):
sql = u'BLOB'
query = PathQuery
model_type = bytes
def format(self, value):
return util.displayable_path(value)
def parse(self, string):
return normpath(bytestring_path(string))
def normalize(self, value):
if isinstance(value, six.text_type):
# Paths stored internally as encoded bytes.
return bytestring_path(value)
elif isinstance(value, BLOB_TYPE):
# We unwrap buffers to bytes.
return bytes(value)
else:
return value
def from_sql(self, sql_value):
return self.normalize(sql_value)
def to_sql(self, value):
if isinstance(value, bytes):
value = BLOB_TYPE(value)
return value
class MusicalKey(types.String):
"""String representing the musical key of a song.
The standard format is C, Cm, C#, C#m, etc.
"""
ENHARMONIC = {
r'db': 'c#',
r'eb': 'd#',
r'gb': 'f#',
r'ab': 'g#',
r'bb': 'a#',
}
def parse(self, key):
key = key.lower()
for flat, sharp in self.ENHARMONIC.items():
key = re.sub(flat, sharp, key)
key = re.sub(r'[\W\s]+minor', 'm', key)
key = re.sub(r'[\W\s]+major', '', key)
return key.capitalize()
def normalize(self, key):
if key is None:
return None
else:
return self.parse(key)
class DurationType(types.Float):
"""Human-friendly (M:SS) representation of a time interval."""
query = dbcore.query.DurationQuery
def format(self, value):
if not beets.config['format_raw_length'].get(bool):
return beets.ui.human_seconds_short(value or 0.0)
else:
return value
def parse(self, string):
try:
# Try to format back hh:ss to seconds.
return util.raw_seconds_short(string)
except ValueError:
# Fall back to a plain float.
try:
return float(string)
except ValueError:
return self.null
# Library-specific sort types.
class SmartArtistSort(dbcore.query.Sort):
"""Sort by artist (either album artist or track artist),
prioritizing the sort field over the raw field.
"""
def __init__(self, model_cls, ascending=True, case_insensitive=True):
self.album = model_cls is Album
self.ascending = ascending
self.case_insensitive = case_insensitive
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
field = 'albumartist' if self.album else 'artist'
collate = 'COLLATE NOCASE' if self.case_insensitive else ''
return ('(CASE {0}_sort WHEN NULL THEN {0} '
'WHEN "" THEN {0} '
'ELSE {0}_sort END) {1} {2}').format(field, collate, order)
def sort(self, objs):
if self.album:
field = lambda a: a.albumartist_sort or a.albumartist
else:
field = lambda i: i.artist_sort or i.artist
if self.case_insensitive:
key = lambda x: field(x).lower()
else:
key = field
return sorted(objs, key=key, reverse=not self.ascending)
# Special path format key.
PF_KEY_DEFAULT = 'default'
# Exceptions.
@six.python_2_unicode_compatible
class FileOperationError(Exception):
"""Indicates an error when interacting with a file on disk.
Possibilities include an unsupported media type, a permissions
error, and an unhandled Mutagen exception.
"""
def __init__(self, path, reason):
"""Create an exception describing an operation on the file at
`path` with the underlying (chained) exception `reason`.
"""
super(FileOperationError, self).__init__(path, reason)
self.path = path
self.reason = reason
def text(self):
"""Get a string representing the error. Describes both the
underlying reason and the file path in question.
"""
return u'{0}: {1}'.format(
util.displayable_path(self.path),
six.text_type(self.reason)
)
# define __str__ as text to avoid infinite loop on super() calls
# with @six.python_2_unicode_compatible
__str__ = text
@six.python_2_unicode_compatible
class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`).
"""
def __str__(self):
return u'error reading ' + super(ReadError, self).text()
@six.python_2_unicode_compatible
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`).
"""
def __str__(self):
return u'error writing ' + super(WriteError, self).text()
# Item and Album model classes.
@six.python_2_unicode_compatible
class LibModel(dbcore.Model):
"""Shared concrete functionality for Items and Albums.
"""
_format_config_key = None
"""Config key that specifies how an instance should be formatted.
"""
def _template_funcs(self):
funcs = DefaultTemplateFunctions(self, self._db).functions()
funcs.update(plugins.template_funcs())
return funcs
def store(self, fields=None):
super(LibModel, self).store(fields)
plugins.send('database_change', lib=self._db, model=self)
def remove(self):
super(LibModel, self).remove()
plugins.send('database_change', lib=self._db, model=self)
def add(self, lib=None):
super(LibModel, self).add(lib)
plugins.send('database_change', lib=self._db, model=self)
def __format__(self, spec):
if not spec:
spec = beets.config[self._format_config_key].as_str()
assert isinstance(spec, six.text_type)
return self.evaluate_template(spec)
def __str__(self):
return format(self)
def __bytes__(self):
return self.__str__().encode('utf-8')
class FormattedItemMapping(dbcore.db.FormattedMapping):
"""Add lookup for album-level fields.
Album-level fields take precedence if `for_path` is true.
"""
def __init__(self, item, for_path=False):
super(FormattedItemMapping, self).__init__(item, for_path)
self.album = item.get_album()
self.album_keys = []
if self.album:
for key in self.album.keys(True):
if key in Album.item_keys or key not in item._fields.keys():
self.album_keys.append(key)
self.all_keys = set(self.model_keys).union(self.album_keys)
def _get(self, key):
"""Get the value for a key, either from the album or the item.
Raise a KeyError for invalid keys.
"""
if self.for_path and key in self.album_keys:
return self._get_formatted(self.album, key)
elif key in self.model_keys:
return self._get_formatted(self.model, key)
elif key in self.album_keys:
return self._get_formatted(self.album, key)
else:
raise KeyError(key)
def __getitem__(self, key):
"""Get the value for a key. Certain unset values are remapped.
"""
value = self._get(key)
# `artist` and `albumartist` fields fall back to one another.
# This is helpful in path formats when the album artist is unset
# on as-is imports.
if key == 'artist' and not value:
return self._get('albumartist')
elif key == 'albumartist' and not value:
return self._get('artist')
else:
return value
def __iter__(self):
return iter(self.all_keys)
def __len__(self):
return len(self.all_keys)
class Item(LibModel):
_table = 'items'
_flex_table = 'item_attributes'
_fields = {
'id': types.PRIMARY_ID,
'path': PathType(),
'album_id': types.FOREIGN_ID,
'title': types.STRING,
'artist': types.STRING,
'artist_sort': types.STRING,
'artist_credit': types.STRING,
'album': types.STRING,
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'genre': types.STRING,
'lyricist': types.STRING,
'composer': types.STRING,
'arranger': types.STRING,
'grouping': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'track': types.PaddedInt(2),
'tracktotal': types.PaddedInt(2),
'disc': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'lyrics': types.STRING,
'comments': types.STRING,
'bpm': types.INTEGER,
'comp': types.BOOLEAN,
'mb_trackid': types.STRING,
'mb_albumid': types.STRING,
'mb_artistid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'acoustid_fingerprint': types.STRING,
'acoustid_id': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'media': types.STRING,
'albumdisambig': types.STRING,
'disctitle': types.STRING,
'encoder': types.STRING,
'rg_track_gain': types.NULL_FLOAT,
'rg_track_peak': types.NULL_FLOAT,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
'initial_key': MusicalKey(),
'length': DurationType(),
'bitrate': types.ScaledInt(1000, u'kbps'),
'format': types.STRING,
'samplerate': types.ScaledInt(1000, u'kHz'),
'bitdepth': types.INTEGER,
'channels': types.INTEGER,
'mtime': DateType(),
'added': DateType(),
}
_search_fields = ('artist', 'title', 'comments',
'album', 'albumartist', 'genre')
_types = {
'data_source': types.STRING,
}
_media_fields = set(MediaFile.readable_fields()) \
.intersection(_fields.keys())
"""Set of item fields that are backed by `MediaFile` fields.
Any kind of field (fixed, flexible, and computed) may be a media
field. Only these fields are read from disk in `read` and written in
`write`.
"""
_media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys())
"""Set of item fields that are backed by *writable* `MediaFile` tag
fields.
This excludes fields that represent audio data, such as `bitrate` or
`length`.
"""
_formatter = FormattedItemMapping
_sorts = {'artist': SmartArtistSort}
_format_config_key = 'format_item'
@classmethod
def _getters(cls):
getters = plugins.item_field_getters()
getters['singleton'] = lambda i: i.album_id is None
getters['filesize'] = Item.try_filesize # In bytes.
return getters
@classmethod
def from_path(cls, path):
"""Creates a new item from the media file at the specified path.
"""
# Initiate with values that aren't read from files.
i = cls(album_id=None)
i.read(path)
i.mtime = i.current_mtime() # Initial mtime.
return i
def __setitem__(self, key, value):
"""Set the item's value for a standard field or a flexattr.
"""
# Encode unicode paths and read buffers.
if key == 'path':
if isinstance(value, six.text_type):
value = bytestring_path(value)
elif isinstance(value, BLOB_TYPE):
value = bytes(value)
if key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
super(Item, self).__setitem__(key, value)
def update(self, values):
"""Set all key/value pairs in the mapping. If mtime is
specified, it is not reset (as it might otherwise be).
"""
super(Item, self).update(values)
if self.mtime == 0 and 'mtime' in values:
self.mtime = values['mtime']
def get_album(self):
"""Get the Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
"""
if not self._db:
return None
return self._db.get_album(self)
# Interaction with file metadata.
def read(self, read_path=None):
"""Read the metadata from the associated file.
If `read_path` is specified, read metadata from that file
instead. Updates all the properties in `_media_fields`
from the media file.
Raises a `ReadError` if the file could not be read.
"""
if read_path is None:
read_path = self.path
else:
read_path = normpath(read_path)
try:
mediafile = MediaFile(syspath(read_path))
except UnreadableFileError as exc:
raise ReadError(read_path, exc)
for key in self._media_fields:
value = getattr(mediafile, key)
if isinstance(value, six.integer_types):
if value.bit_length() > 63:
value = 0
self[key] = value
# Database's mtime should now reflect the on-disk value.
if read_path == self.path:
self.mtime = self.current_mtime()
self.path = read_path
def write(self, path=None, tags=None):
"""Write the item's metadata to a media file.
All fields in `_media_fields` are written to disk according to
the values on this object.
`path` is the path of the mediafile to write the data to. It
defaults to the item's path.
`tags` is a dictionary of additional metadata the should be
written to the file. (These tags need not be in `_media_fields`.)
Can raise either a `ReadError` or a `WriteError`.
"""
if path is None:
path = self.path
else:
path = normpath(path)
# Get the data to write to the file.
item_tags = dict(self)
item_tags = {k: v for k, v in item_tags.items()
if k in self._media_fields} # Only write media fields.
if tags is not None:
item_tags.update(tags)
plugins.send('write', item=self, path=path, tags=item_tags)
# Open the file.
try:
mediafile = MediaFile(syspath(path),
id3v23=beets.config['id3v23'].get(bool))
except UnreadableFileError as exc:
raise ReadError(self.path, exc)
# Write the tags to the file.
mediafile.update(item_tags)
try:
mediafile.save()
except UnreadableFileError as exc:
raise WriteError(self.path, exc)
# The file has a new mtime.
if path == self.path:
self.mtime = self.current_mtime()
plugins.send('after_write', item=self, path=path)
def try_write(self, path=None, tags=None):
"""Calls `write()` but catches and logs `FileOperationError`
exceptions.
Returns `False` an exception was caught and `True` otherwise.
"""
try:
self.write(path, tags)
return True
except FileOperationError as exc:
log.error(u"{0}", exc)
return False
def try_sync(self, write, move, with_album=True):
"""Synchronize the item with the database and, possibly, updates its
tags on disk and its path (by moving the file).
`write` indicates whether to write new tags into the file. Similarly,
`move` controls whether the path should be updated. In the
latter case, files are *only* moved when they are inside their
library's directory (if any).
Similar to calling :meth:`write`, :meth:`move`, and :meth:`store`
(conditionally).
"""
if write:
self.try_write()
if move:
# Check whether this file is inside the library directory.
if self._db and self._db.directory in util.ancestry(self.path):
log.debug(u'moving {0} to synchronize path',
util.displayable_path(self.path))
self.move(with_album=with_album)
self.store()
# Files themselves.
def move_file(self, dest, copy=False, link=False, hardlink=False):
"""Moves or copies the item's file, updating the path value if
the move succeeds. If a file exists at ``dest``, then it is
slightly modified to be unique.
"""
if not util.samefile(self.path, dest):
dest = util.unique_path(dest)
if copy:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif link:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
elif hardlink:
util.hardlink(self.path, dest)
plugins.send("item_hardlinked", item=self, source=self.path,
destination=dest)
else:
plugins.send("before_item_moved", item=self, source=self.path,
destination=dest)
util.move(self.path, dest)
plugins.send("item_moved", item=self, source=self.path,
destination=dest)
# Either copying or moving succeeded, so update the stored path.
self.path = dest
def current_mtime(self):
"""Returns the current mtime of the file, rounded to the nearest
integer.
"""
return int(os.path.getmtime(syspath(self.path)))
def try_filesize(self):
"""Get the size of the underlying file in bytes.
If the file is missing, return 0 (and log a warning).
"""
try:
return os.path.getsize(syspath(self.path))
except (OSError, Exception) as exc:
log.warning(u'could not get filesize: {0}', exc)
return 0
# Model methods.
def remove(self, delete=False, with_album=True):
"""Removes the item. If `delete`, then the associated file is
removed from disk. If `with_album`, then the item's album (if
any) is removed if it the item was the last in the album.
"""
super(Item, self).remove()
# Remove the album if it is empty.
if with_album:
album = self.get_album()
if album and not album.items():
album.remove(delete, False)
# Send a 'item_removed' signal to plugins
plugins.send('item_removed', item=self)
# Delete the associated file.
if delete:
util.remove(self.path)
util.prune_dirs(os.path.dirname(self.path), self._db.directory)
self._db._memotable = {}
def move(self, copy=False, link=False, hardlink=False, basedir=None,
with_album=True, store=True):
"""Move the item to its designated location within the library
directory (provided by destination()). Subdirectories are
created as needed. If the operation succeeds, the item's path
field is updated to reflect the new location.
If `copy` is true, moving the file is copied rather than moved.
Similarly, `link` creates a symlink instead, and `hardlink`
creates a hardlink.
basedir overrides the library base directory for the
destination.
If the item is in an album, the album is given an opportunity to
move its art. (This can be disabled by passing
with_album=False.)
By default, the item is stored to the database if it is in the
database, so any dirty fields prior to the move() call will be written
as a side effect. You probably want to call save() to commit the DB
transaction. If `store` is true however, the item won't be stored, and
you'll have to manually store it after invoking this method.
"""
self._check_db()
dest = self.destination(basedir=basedir)
# Create necessary ancestry for the move.
util.mkdirall(dest)
# Perform the move and store the change.
old_path = self.path
self.move_file(dest, copy, link, hardlink)
if store:
self.store()
# If this item is in an album, move its art.
if with_album:
album = self.get_album()
if album:
album.move_art(copy)
if store:
album.store()
# Prune vacated directory.
if not copy:
util.prune_dirs(os.path.dirname(old_path), self._db.directory)
# Templating.
def destination(self, fragment=False, basedir=None, platform=None,
path_formats=None):
"""Returns the path in the library directory designated for the
item (i.e., where the file ought to be). fragment makes this
method return just the path fragment underneath the root library
directory; the path is also returned as Unicode instead of
encoded as a bytestring. basedir can override the library's base
directory for the destination.
"""
self._check_db()
platform = platform or sys.platform
basedir = basedir or self._db.directory
path_formats = path_formats or self._db.path_formats
# Use a path format based on a query, falling back on the
# default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
continue
query, _ = parse_query_string(query, type(self))
if query.match(self):
# The query matches the item! Use the corresponding path
# format.
break
else:
# No query matched; fall back to default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
break
else:
assert False, u"no default path format"
if isinstance(path_format, Template):
subpath_tmpl = path_format
else:
subpath_tmpl = Template(path_format)
# Evaluate the selected template.
subpath = self.evaluate_template(subpath_tmpl, True)
# Prepare path for output: normalize Unicode characters.
if platform == 'darwin':
subpath = unicodedata.normalize('NFD', subpath)
else:
subpath = unicodedata.normalize('NFC', subpath)
if beets.config['asciify_paths']:
subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
maxlen = beets.config['max_filename_length'].get(int)
if not maxlen:
# When zero, try to determine from filesystem.
maxlen = util.max_filename_length(self._db.directory)
subpath, fellback = util.legalize_path(
subpath, self._db.replacements, maxlen,
os.path.splitext(self.path)[1], fragment
)
if fellback:
# Print an error message if legalization fell back to
# default replacements because of the maximum length.
log.warning(
u'Fell back to default replacements when naming '
u'file {}. Configure replacements to avoid lengthening '
u'the filename.',
subpath
)
if fragment:
return util.as_string(subpath)
else:
return normpath(os.path.join(basedir, subpath))
class Album(LibModel):
"""Provides access to information about albums stored in a
library. Reflects the library's "albums" table, including album
art.
"""
_table = 'albums'
_flex_table = 'album_attributes'
_always_dirty = True
_fields = {
'id': types.PRIMARY_ID,
'artpath': PathType(),
'added': DateType(),
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'album': types.STRING,
'genre': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'comp': types.BOOLEAN,
'mb_albumid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'albumdisambig': types.STRING,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
}
_search_fields = ('album', 'albumartist', 'genre')
_types = {
'path': PathType(),
'data_source': types.STRING,
}
_sorts = {
'albumartist': SmartArtistSort,
'artist': SmartArtistSort,
}
item_keys = [
'added',
'albumartist',
'albumartist_sort',
'albumartist_credit',
'album',
'genre',
'year',
'month',
'day',
'disctotal',
'comp',
'mb_albumid',
'mb_albumartistid',
'albumtype',
'label',
'mb_releasegroupid',
'asin',
'catalognum',
'script',
'language',
'country',
'albumstatus',
'albumdisambig',
'rg_album_gain',
'rg_album_peak',
'original_year',
'original_month',
'original_day',
]
"""List of keys that are set on an album's items.
"""
_format_config_key = 'format_album'
@classmethod
def _getters(cls):
# In addition to plugin-provided computed fields, also expose
# the album's directory as `path`.
getters = plugins.album_field_getters()
getters['path'] = Album.item_dir
getters['albumtotal'] = Album._albumtotal
return getters
def items(self):
"""Returns an iterable over the items associated with this
album.
"""
return self._db.items(dbcore.MatchQuery('album_id', self.id))
def remove(self, delete=False, with_items=True):
"""Removes this album and all its associated items from the
library. If delete, then the items' files are also deleted
from disk, along with any album art. The directories
containing the album are also removed (recursively) if empty.
Set with_items to False to avoid removing the album's items.
"""
super(Album, self).remove()
# Delete art file.
if delete:
artpath = self.artpath
if artpath:
util.remove(artpath)
# Remove (and possibly delete) the constituent items.
if with_items:
for item in self.items():
item.remove(delete, False)
def move_art(self, copy=False, link=False, hardlink=False):
"""Move or copy any existing album art so that it remains in the
same directory as the items.
"""
old_art = self.artpath
if not old_art:
return
new_art = self.art_destination(old_art)
if new_art == old_art:
return
new_art = util.unique_path(new_art)
log.debug(u'moving album art {0} to {1}',
util.displayable_path(old_art),
util.displayable_path(new_art))
if copy:
util.copy(old_art, new_art)
elif link:
util.link(old_art, new_art)
elif hardlink:
util.hardlink(old_art, new_art)
else:
util.move(old_art, new_art)
self.artpath = new_art
# Prune old path when moving.
if not copy:
util.prune_dirs(os.path.dirname(old_art),
self._db.directory)
def move(self, copy=False, link=False, hardlink=False, basedir=None,
store=True):
"""Moves (or copies) all items to their destination. Any album
art moves along with them. basedir overrides the library base
directory for the destination. By default, the album is stored to the
database, persisting any modifications to its metadata. If `store` is
true however, the album is not stored automatically, and you'll have
to manually store it after invoking this method.
"""
basedir = basedir or self._db.directory
# Ensure new metadata is available to items for destination
# computation.
if store:
self.store()
# Move items.
items = list(self.items())
for item in items:
item.move(copy, link, hardlink, basedir=basedir, with_album=False,
store=store)
# Move art.
self.move_art(copy, link, hardlink)
if store:
self.store()
def item_dir(self):
"""Returns the directory containing the album's first item,
provided that such an item exists.
"""
item = self.items().get()
if not item:
raise ValueError(u'empty album')
return os.path.dirname(item.path)
def _albumtotal(self):
"""Return the total number of tracks on all discs on the album
"""
if self.disctotal == 1 or not beets.config['per_disc_numbering']:
return self.items()[0].tracktotal
counted = []
total = 0
for item in self.items():
if item.disc in counted:
continue
total += item.tracktotal
counted.append(item.disc)
if len(counted) == self.disctotal:
break
return total
def art_destination(self, image, item_dir=None):
"""Returns a path to the destination for the album art image
for the album. `image` is the path of the image that will be
moved there (used for its extension).
The path construction uses the existing path of the album's
items, so the album must contain at least one item or
item_dir must be provided.
"""
image = bytestring_path(image)
item_dir = item_dir or self.item_dir()
filename_tmpl = Template(
beets.config['art_filename'].as_str())
subpath = self.evaluate_template(filename_tmpl, True)
if beets.config['asciify_paths']:
subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
subpath = util.sanitize_path(subpath,
replacements=self._db.replacements)
subpath = bytestring_path(subpath)
_, ext = os.path.splitext(image)
dest = os.path.join(item_dir, subpath + ext)
return bytestring_path(dest)
def set_art(self, path, copy=True):
"""Sets the album's cover art to the image at the given path.
The image is copied (or moved) into place, replacing any
existing art.
Sends an 'art_set' event with `self` as the sole argument.
"""
path = bytestring_path(path)
oldart = self.artpath
artdest = self.art_destination(path)
if oldart and samefile(path, oldart):
# Art already set.
return
elif samefile(path, artdest):
# Art already in place.
self.artpath = path
return
# Normal operation.
if oldart == artdest:
util.remove(oldart)
artdest = util.unique_path(artdest)
if copy:
util.copy(path, artdest)
else:
util.move(path, artdest)
self.artpath = artdest
plugins.send('art_set', album=self)
def store(self, fields=None):
"""Update the database with the album information. The album's
tracks are also updated.
:param fields: The fields to be stored. If not specified, all fields
will be.
"""
# Get modified track fields.
track_updates = {}
for key in self.item_keys:
if key in self._dirty:
track_updates[key] = self[key]
with self._db.transaction():
super(Album, self).store(fields)
if track_updates:
for item in self.items():
for key, value in track_updates.items():
item[key] = value
item.store()
def try_sync(self, write, move):
"""Synchronize the album and its items with the database.
Optionally, also write any new tags into the files and update
their paths.
`write` indicates whether to write tags to the item files, and
`move` controls whether files (both audio and album art) are
moved.
"""
self.store()
for item in self.items():
item.try_sync(write, move)
# Query construction helpers.
def parse_query_parts(parts, model_cls):
"""Given a beets query string as a list of components, return the
`Query` and `Sort` they represent.
Like `dbcore.parse_sorted_query`, with beets query prefixes and
special path query detection.
"""
# Get query types and their prefix characters.
prefixes = {':': dbcore.query.RegexpQuery}
prefixes.update(plugins.queries())
# Special-case path-like queries, which are non-field queries
# containing path separators (/).
path_parts = []
non_path_parts = []
for s in parts:
if PathQuery.is_path_query(s):
path_parts.append(s)
else:
non_path_parts.append(s)
query, sort = dbcore.parse_sorted_query(
model_cls, non_path_parts, prefixes
)
# Add path queries to aggregate query.
# Match field / flexattr depending on whether the model has the path field
fast_path_query = 'path' in model_cls._fields
query.subqueries += [PathQuery('path', s, fast_path_query)
for s in path_parts]
return query, sort
def parse_query_string(s, model_cls):
"""Given a beets query string, return the `Query` and `Sort` they
represent.
The string is split into components using shell-like syntax.
"""
message = u"Query is not unicode: {0!r}".format(s)
assert isinstance(s, six.text_type), message
try:
parts = util.shlex_split(s)
except ValueError as exc:
raise dbcore.InvalidQueryError(s, exc)
return parse_query_parts(parts, model_cls)
def _sqlite_bytelower(bytestring):
""" A custom ``bytelower`` sqlite function so we can compare
bytestrings in a semi case insensitive fashion. This is to work
around sqlite builds are that compiled with
``-DSQLITE_LIKE_DOESNT_MATCH_BLOBS``. See
``https://github.com/beetbox/beets/issues/2172`` for details.
"""
if not six.PY2:
return bytestring.lower()
return buffer(bytes(bytestring).lower()) # noqa: F821
# The Library: interface to the database.
class Library(dbcore.Database):
"""A database of music containing songs and albums.
"""
_models = (Item, Album)
def __init__(self, path='library.blb',
directory='~/Music',
path_formats=((PF_KEY_DEFAULT,
'$artist/$album/$track $title'),),
replacements=None):
timeout = beets.config['timeout'].as_number()
super(Library, self).__init__(path, timeout=timeout)
self.directory = bytestring_path(normpath(directory))
self.path_formats = path_formats
self.replacements = replacements
self._memotable = {} # Used for template substitution performance.
def _create_connection(self):
conn = super(Library, self)._create_connection()
conn.create_function('bytelower', 1, _sqlite_bytelower)
return conn
# Adding objects to the database.
def add(self, obj):
"""Add the :class:`Item` or :class:`Album` object to the library
database. Return the object's new id.
"""
obj.add(self)
self._memotable = {}
return obj.id
def add_album(self, items):
"""Create a new album consisting of a list of items.
The items are added to the database if they don't yet have an
ID. Return a new :class:`Album` object. The list items must not
be empty.
"""
if not items:
raise ValueError(u'need at least one item')
# Create the album structure using metadata from the first item.
values = dict((key, items[0][key]) for key in Album.item_keys)
album = Album(self, **values)
# Add the album structure and set the items' album_id fields.
# Store or add the items.
with self.transaction():
album.add(self)
for item in items:
item.album_id = album.id
if item.id is None:
item.add(self)
else:
item.store()
return album
# Querying.
def _fetch(self, model_cls, query, sort=None):
"""Parse a query and fetch. If a order specification is present
in the query string the `sort` argument is ignored.
"""
# Parse the query, if necessary.
try:
parsed_sort = None
if isinstance(query, six.string_types):
query, parsed_sort = parse_query_string(query, model_cls)
elif isinstance(query, (list, tuple)):
query, parsed_sort = parse_query_parts(query, model_cls)
except dbcore.query.InvalidQueryArgumentValueError as exc:
raise dbcore.InvalidQueryError(query, exc)
# Any non-null sort specified by the parsed query overrides the
# provided sort.
if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort):
sort = parsed_sort
return super(Library, self)._fetch(
model_cls, query, sort
)
@staticmethod
def get_default_album_sort():
"""Get a :class:`Sort` object for albums from the config option.
"""
return dbcore.sort_from_strings(
Album, beets.config['sort_album'].as_str_seq())
@staticmethod
def get_default_item_sort():
"""Get a :class:`Sort` object for items from the config option.
"""
return dbcore.sort_from_strings(
Item, beets.config['sort_item'].as_str_seq())
def albums(self, query=None, sort=None):
"""Get :class:`Album` objects matching the query.
"""
return self._fetch(Album, query, sort or self.get_default_album_sort())
def items(self, query=None, sort=None):
"""Get :class:`Item` objects matching the query.
"""
return self._fetch(Item, query, sort or self.get_default_item_sort())
# Convenience accessors.
def get_item(self, id):
"""Fetch an :class:`Item` by its ID. Returns `None` if no match is
found.
"""
return self._get(Item, id)
def get_album(self, item_or_id):
"""Given an album ID or an item associated with an album, return
an :class:`Album` object for the album. If no such album exists,
returns `None`.
"""
if isinstance(item_or_id, int):
album_id = item_or_id
else:
album_id = item_or_id.album_id
if album_id is None:
return None
return self._get(Album, album_id)
# Default path template resources.
def _int_arg(s):
"""Convert a string argument to an integer for use in a template
function. May raise a ValueError.
"""
return int(s.strip())
class DefaultTemplateFunctions(object):
"""A container class for the default functions provided to path
templates. These functions are contained in an object to provide
additional context to the functions -- specifically, the Item being
evaluated.
"""
_prefix = 'tmpl_'
def __init__(self, item=None, lib=None):
"""Parametrize the functions. If `item` or `lib` is None, then
some functions (namely, ``aunique``) will always evaluate to the
empty string.
"""
self.item = item
self.lib = lib
def functions(self):
"""Returns a dictionary containing the functions defined in this
object. The keys are function names (as exposed in templates)
and the values are Python functions.
"""
out = {}
for key in self._func_names:
out[key[len(self._prefix):]] = getattr(self, key)
return out
@staticmethod
def tmpl_lower(s):
"""Convert a string to lower case."""
return s.lower()
@staticmethod
def tmpl_upper(s):
"""Covert a string to upper case."""
return s.upper()
@staticmethod
def tmpl_title(s):
"""Convert a string to title case."""
return s.title()
@staticmethod
def tmpl_left(s, chars):
"""Get the leftmost characters of a string."""
return s[0:_int_arg(chars)]
@staticmethod
def tmpl_right(s, chars):
"""Get the rightmost characters of a string."""
return s[-_int_arg(chars):]
@staticmethod
def tmpl_if(condition, trueval, falseval=u''):
"""If ``condition`` is nonempty and nonzero, emit ``trueval``;
otherwise, emit ``falseval`` (if provided).
"""
try:
int_condition = _int_arg(condition)
except ValueError:
if condition.lower() == "false":
return falseval
else:
condition = int_condition
if condition:
return trueval
else:
return falseval
@staticmethod
def tmpl_asciify(s):
"""Translate non-ASCII characters to their ASCII equivalents.
"""
return util.asciify_path(s, beets.config['path_sep_replace'].as_str())
@staticmethod
def tmpl_time(s, fmt):
"""Format a time value using `strftime`.
"""
cur_fmt = beets.config['time_format'].as_str()
return time.strftime(fmt, time.strptime(s, cur_fmt))
def tmpl_aunique(self, keys=None, disam=None, bracket=None):
"""Generate a string that is guaranteed to be unique among all
albums in the library who share the same set of keys. A fields
from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names, while "bracket" is a
pair of characters to be used as brackets surrounding the
disambiguator or empty to have no brackets.
"""
# Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib:
return u''
if self.item.album_id is None:
return u''
memokey = ('aunique', keys, disam, self.item.album_id)
memoval = self.lib._memotable.get(memokey)
if memoval is not None:
return memoval
keys = keys or 'albumartist album'
disam = disam or 'albumtype year label catalognum albumdisambig'
if bracket is None:
bracket = '[]'
keys = keys.split()
disam = disam.split()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = u''
bracket_r = u''
album = self.lib.get_album(self.item)
if not album:
# Do nothing for singletons.
self.lib._memotable[memokey] = u''
return u''
# Find matching albums to disambiguate with.
subqueries = []
for key in keys:
value = album.get(key, '')
subqueries.append(dbcore.MatchQuery(key, value))
albums = self.lib.albums(dbcore.AndQuery(subqueries))
# If there's only one album to matching these details, then do
# nothing.
if len(albums) == 1:
self.lib._memotable[memokey] = u''
return u''
# Find the first disambiguator that distinguishes the albums.
for disambiguator in disam:
# Get the value for each album for the current field.
disam_values = set([a.get(disambiguator, '') for a in albums])
# If the set of unique values is equal to the number of
# albums in the disambiguation set, we're done -- this is
# sufficient disambiguation.
if len(disam_values) == len(albums):
break
else:
# No disambiguator distinguished all fields.
res = u' {1}{0}{2}'.format(album.id, bracket_l, bracket_r)
self.lib._memotable[memokey] = res
return res
# Flatten disambiguation value into a string.
disam_value = album.formatted(True).get(disambiguator)
# Return empty string if disambiguator is empty.
if disam_value:
res = u' {1}{0}{2}'.format(disam_value, bracket_l, bracket_r)
else:
res = u''
self.lib._memotable[memokey] = res
return res
@staticmethod
def tmpl_first(s, count=1, skip=0, sep=u'; ', join_str=u'; '):
""" Gets the item(s) from x to y in a string separated by something
and join then with something
:param s: the string
:param count: The number of items included
:param skip: The number of items skipped
:param sep: the separator. Usually is '; ' (default) or '/ '
:param join_str: the string which will join the items, default '; '.
"""
skip = int(skip)
count = skip + int(count)
return join_str.join(s.split(sep)[skip:count])
def tmpl_ifdef(self, field, trueval=u'', falseval=u''):
""" If field exists return trueval or the field (default)
otherwise, emit return falseval (if provided).
:param field: The name of the field
:param trueval: The string if the condition is true
:param falseval: The string if the condition is false
:return: The string, based on condition
"""
if self.item.formatted().get(field):
return trueval if trueval else self.item.formatted().get(field)
else:
return falseval
# Get the name of tmpl_* functions in the above class.
DefaultTemplateFunctions._func_names = \
[s for s in dir(DefaultTemplateFunctions)
if s.startswith(DefaultTemplateFunctions._prefix)]
| mit | -958,155,747,688,783,600 | 32.917408 | 79 | 0.577324 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.